diff --git a/Documentation/ABI/testing/sysfs-driver-ufs b/Documentation/ABI/testing/sysfs-driver-ufs index d4140dc6c5ba..a90612ab5780 100644 --- a/Documentation/ABI/testing/sysfs-driver-ufs +++ b/Documentation/ABI/testing/sysfs-driver-ufs @@ -711,7 +711,7 @@ Description: This file shows the thin provisioning type. This is one of The file is read only. -What: /sys/class/scsi_device/*/device/unit_descriptor/physical_memory_resourse_count +What: /sys/class/scsi_device/*/device/unit_descriptor/physical_memory_resource_count Date: February 2018 Contact: Stanislav Nijnikov Description: This file shows the total physical memory resources. This is @@ -1685,3 +1685,86 @@ Description: ================ ======================================== The file is read only. + +What: /sys/bus/platform/drivers/ufshcd/*/hid/analysis_trigger +What: /sys/bus/platform/devices/*.ufs/hid/analysis_trigger +Date: May 2025 +Contact: Huan Tang +Description: + The host can enable or disable HID analysis operation. + + ======= ========================================= + disable disable HID analysis operation + enable enable HID analysis operation + ======= ========================================= + + The file is write only. + +What: /sys/bus/platform/drivers/ufshcd/*/hid/defrag_trigger +What: /sys/bus/platform/devices/*.ufs/hid/defrag_trigger +Date: May 2025 +Contact: Huan Tang +Description: + The host can enable or disable HID defragmentation operation. + + ======= ========================================= + disable disable HID defragmentation operation + enable enable HID defragmentation operation + ======= ========================================= + + The attribute is write only. + +What: /sys/bus/platform/drivers/ufshcd/*/hid/fragmented_size +What: /sys/bus/platform/devices/*.ufs/hid/fragmented_size +Date: May 2025 +Contact: Huan Tang +Description: + The total fragmented size in the device is reported through + this attribute. + + The attribute is read only. + +What: /sys/bus/platform/drivers/ufshcd/*/hid/defrag_size +What: /sys/bus/platform/devices/*.ufs/hid/defrag_size +Date: May 2025 +Contact: Huan Tang +Description: + The host sets the size to be defragmented by an HID + defragmentation operation. + + The attribute is read/write. + +What: /sys/bus/platform/drivers/ufshcd/*/hid/progress_ratio +What: /sys/bus/platform/devices/*.ufs/hid/progress_ratio +Date: May 2025 +Contact: Huan Tang +Description: + Defragmentation progress is reported by this attribute, + indicates the ratio of the completed defragmentation size + over the requested defragmentation size. + + ==== ============================================ + 1 1% + ... + 100 100% + ==== ============================================ + + The attribute is read only. + +What: /sys/bus/platform/drivers/ufshcd/*/hid/state +What: /sys/bus/platform/devices/*.ufs/hid/state +Date: May 2025 +Contact: Huan Tang +Description: + The HID state is reported by this attribute. + + ==================== =========================== + idle Idle (analysis required) + analysis_in_progress Analysis in progress + defrag_required Defrag required + defrag_in_progress Defrag in progress + defrag_completed Defrag completed + defrag_not_required Defrag is not required + ==================== =========================== + + The attribute is read only. diff --git a/Documentation/devicetree/bindings/ufs/mediatek,ufs.yaml b/Documentation/devicetree/bindings/ufs/mediatek,ufs.yaml index 32fd535a514a..1dec54fb00f3 100644 --- a/Documentation/devicetree/bindings/ufs/mediatek,ufs.yaml +++ b/Documentation/devicetree/bindings/ufs/mediatek,ufs.yaml @@ -9,21 +9,20 @@ title: Mediatek Universal Flash Storage (UFS) Controller maintainers: - Stanley Chu -allOf: - - $ref: ufs-common.yaml - properties: compatible: enum: - mediatek,mt8183-ufshci - mediatek,mt8192-ufshci + - mediatek,mt8195-ufshci clocks: - maxItems: 1 + minItems: 1 + maxItems: 8 clock-names: - items: - - const: ufs + minItems: 1 + maxItems: 8 phys: maxItems: 1 @@ -33,6 +32,10 @@ properties: vcc-supply: true + mediatek,ufs-disable-mcq: + $ref: /schemas/types.yaml#/definitions/flag + description: The mask to disable MCQ (Multi-Circular Queue) for UFS host. + required: - compatible - clocks @@ -43,6 +46,37 @@ required: unevaluatedProperties: false +allOf: + - $ref: ufs-common.yaml + + - if: + properties: + compatible: + contains: + enum: + - mediatek,mt8195-ufshci + then: + properties: + clocks: + minItems: 8 + clock-names: + items: + - const: ufs + - const: ufs_aes + - const: ufs_tick + - const: unipro_sysclk + - const: unipro_tick + - const: unipro_mp_bclk + - const: ufs_tx_symbol + - const: ufs_mem_sub + else: + properties: + clocks: + maxItems: 1 + clock-names: + items: + - const: ufs + examples: - | #include diff --git a/Documentation/scsi/scsi_fc_transport.rst b/Documentation/scsi/scsi_fc_transport.rst index e3ddcfb7f8fd..5ef75575924e 100644 --- a/Documentation/scsi/scsi_fc_transport.rst +++ b/Documentation/scsi/scsi_fc_transport.rst @@ -30,7 +30,40 @@ This file is found at Documentation/scsi/scsi_fc_transport.rst FC Remote Ports (rports) ======================== -<< To Be Supplied >> + + In the Fibre Channel (FC) subsystem, a remote port (rport) refers to a + remote Fibre Channel node that the local port can communicate with. + These are typically storage targets (e.g., arrays, tapes) that respond + to SCSI commands over FC transport. + + In Linux, rports are managed by the FC transport class and are + represented in sysfs under: + + /sys/class/fc_remote_ports/ + + Each rport directory contains attributes describing the remote port, + such as port ID, node name, port state, and link speed. + + rports are typically created by the FC transport when a new device is + discovered during a fabric login or scan, and they persist until the + device is removed or the link is lost. + + Common attributes: + - node_name: World Wide Node Name (WWNN). + - port_name: World Wide Port Name (WWPN). + - port_id: FC address of the remote port. + - roles: Indicates if the port is an initiator, target, or both. + - port_state: Shows the current operational state. + + After discovering a remote port, the driver typically populates a + fc_rport_identifiers structure and invokes fc_remote_port_add() to + create and register the remote port with the SCSI subsystem via the + Fibre Channel (FC) transport class. + + rports are also visible via sysfs as children of the FC host adapter. + + For developers: use fc_remote_port_add() and fc_remote_port_delete() when + implementing a driver that interacts with the FC transport class. FC Virtual Ports (vports) diff --git a/MAINTAINERS b/MAINTAINERS index a92290fffa16..05325fab7a6b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -10949,7 +10949,7 @@ F: Documentation/devicetree/bindings/infiniband/hisilicon-hns-roce.txt F: drivers/infiniband/hw/hns/ HISILICON SAS Controller -M: Yihang Li +M: Yihang Li S: Supported W: http://www.hisilicon.com F: Documentation/devicetree/bindings/scsi/hisilicon-sas.txt diff --git a/arch/arm64/boot/dts/mediatek/mt8195.dtsi b/arch/arm64/boot/dts/mediatek/mt8195.dtsi index dd065b1bf94a..8877953ce292 100644 --- a/arch/arm64/boot/dts/mediatek/mt8195.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8195.dtsi @@ -1430,6 +1430,31 @@ mmc2: mmc@11250000 { status = "disabled"; }; + ufshci: ufshci@11270000 { + compatible = "mediatek,mt8195-ufshci"; + reg = <0 0x11270000 0 0x2300>; + interrupts = ; + phys = <&ufsphy>; + clocks = <&infracfg_ao CLK_INFRA_AO_AES_UFSFDE>, + <&infracfg_ao CLK_INFRA_AO_AES>, + <&infracfg_ao CLK_INFRA_AO_UFS_TICK>, + <&infracfg_ao CLK_INFRA_AO_UNIPRO_SYS>, + <&infracfg_ao CLK_INFRA_AO_UNIPRO_TICK>, + <&infracfg_ao CLK_INFRA_AO_UFS_MP_SAP_B>, + <&infracfg_ao CLK_INFRA_AO_UFS_TX_SYMBOL>, + <&infracfg_ao CLK_INFRA_AO_PERI_UFS_MEM_SUB>; + clock-names = "ufs", "ufs_aes", "ufs_tick", + "unipro_sysclk", "unipro_tick", + "unipro_mp_bclk", "ufs_tx_symbol", + "ufs_mem_sub"; + freq-table-hz = <0 0>, <0 0>, <0 0>, + <0 0>, <0 0>, <0 0>, + <0 0>, <0 0>; + + mediatek,ufs-disable-mcq; + status = "disabled"; + }; + lvts_mcu: thermal-sensor@11278000 { compatible = "mediatek,mt8195-lvts-mcu"; reg = <0 0x11278000 0 0x1000>; diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 1378651735f6..23ed2fc688f0 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -3705,9 +3705,10 @@ static ssize_t add_target_store(struct device *dev, target_host->max_id = 1; target_host->max_lun = -1LL; target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb; - target_host->max_segment_size = ib_dma_max_seg_size(ibdev); - if (!(ibdev->attrs.kernel_cap_flags & IBK_SG_GAPS_REG)) + if (ibdev->attrs.kernel_cap_flags & IBK_SG_GAPS_REG) + target_host->max_segment_size = ib_dma_max_seg_size(ibdev); + else target_host->virt_boundary_mask = ~srp_dev->mr_page_mask; target = host_to_target(target_host); diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c index 28cf18955a08..726c8531b7d3 100644 --- a/drivers/scsi/aacraid/comminit.c +++ b/drivers/scsi/aacraid/comminit.c @@ -481,8 +481,7 @@ void aac_define_int_mode(struct aac_dev *dev) pci_find_capability(dev->pdev, PCI_CAP_ID_MSIX)) { min_msix = 2; i = pci_alloc_irq_vectors(dev->pdev, - min_msix, msi_count, - PCI_IRQ_MSIX | PCI_IRQ_AFFINITY); + min_msix, msi_count, PCI_IRQ_MSIX); if (i > 0) { dev->msi_enabled = 1; msi_count = i; diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c index a719a18f0fbc..f56e008ee52b 100644 --- a/drivers/scsi/bfa/bfad_im.c +++ b/drivers/scsi/bfa/bfad_im.c @@ -706,6 +706,7 @@ bfad_im_probe(struct bfad_s *bfad) if (bfad_thread_workq(bfad) != BFA_STATUS_OK) { kfree(im); + bfad->im = NULL; return BFA_STATUS_FAILED; } diff --git a/drivers/scsi/elx/efct/efct_lio.c b/drivers/scsi/elx/efct/efct_lio.c index 9ac69356b13e..bd3d489e56ae 100644 --- a/drivers/scsi/elx/efct/efct_lio.c +++ b/drivers/scsi/elx/efct/efct_lio.c @@ -382,7 +382,7 @@ efct_lio_sg_unmap(struct efct_io *io) return; dma_unmap_sg(&io->efct->pci->dev, cmd->t_data_sg, - ocp->seg_map_cnt, cmd->data_direction); + cmd->t_data_nents, cmd->data_direction); ocp->seg_map_cnt = 0; } diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index b911fdb387f3..4912087de10d 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -1299,26 +1299,6 @@ static void fcoe_thread_cleanup_local(unsigned int cpu) flush_work(&p->work); } -/** - * fcoe_select_cpu() - Selects CPU to handle post-processing of incoming - * command. - * - * This routine selects next CPU based on cpumask to distribute - * incoming requests in round robin. - * - * Returns: int CPU number - */ -static inline unsigned int fcoe_select_cpu(void) -{ - static unsigned int selected_cpu; - - selected_cpu = cpumask_next(selected_cpu, cpu_online_mask); - if (selected_cpu >= nr_cpu_ids) - selected_cpu = cpumask_first(cpu_online_mask); - - return selected_cpu; -} - /** * fcoe_rcv() - Receive packets from a net device * @skb: The received packet @@ -1405,7 +1385,7 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev, cpu = ntohs(fh->fh_ox_id) & fc_cpu_mask; else { if (ntohs(fh->fh_rx_id) == FC_XID_UNKNOWN) - cpu = fcoe_select_cpu(); + cpu = skb->alloc_cpu; else cpu = ntohs(fh->fh_rx_id) & fc_cpu_mask; } diff --git a/drivers/scsi/fnic/fdls_disc.c b/drivers/scsi/fnic/fdls_disc.c index f8ab69c51dab..ae37f85f618b 100644 --- a/drivers/scsi/fnic/fdls_disc.c +++ b/drivers/scsi/fnic/fdls_disc.c @@ -763,50 +763,86 @@ static void fdls_send_fabric_abts(struct fnic_iport_s *iport) iport->fabric.timer_pending = 1; } -static void fdls_send_fdmi_abts(struct fnic_iport_s *iport) +static uint8_t *fdls_alloc_init_fdmi_abts_frame(struct fnic_iport_s *iport, + uint16_t oxid) { - uint8_t *frame; + struct fc_frame_header *pfdmi_abts; uint8_t d_id[3]; + uint8_t *frame; struct fnic *fnic = iport->fnic; - struct fc_frame_header *pfabric_abts; - unsigned long fdmi_tov; - uint16_t oxid; - uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + - sizeof(struct fc_frame_header); frame = fdls_alloc_frame(iport); if (frame == NULL) { FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, "Failed to allocate frame to send FDMI ABTS"); - return; + return NULL; } - pfabric_abts = (struct fc_frame_header *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + pfdmi_abts = (struct fc_frame_header *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); fdls_init_fabric_abts_frame(frame, iport); hton24(d_id, FC_FID_MGMT_SERV); - FNIC_STD_SET_D_ID(*pfabric_abts, d_id); + FNIC_STD_SET_D_ID(*pfdmi_abts, d_id); + FNIC_STD_SET_OX_ID(*pfdmi_abts, oxid); + + return frame; +} + +static void fdls_send_fdmi_abts(struct fnic_iport_s *iport) +{ + uint8_t *frame; + struct fnic *fnic = iport->fnic; + unsigned long fdmi_tov; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_frame_header); if (iport->fabric.fdmi_pending & FDLS_FDMI_PLOGI_PENDING) { - oxid = iport->active_oxid_fdmi_plogi; - FNIC_STD_SET_OX_ID(*pfabric_abts, oxid); + frame = fdls_alloc_init_fdmi_abts_frame(iport, + iport->active_oxid_fdmi_plogi); + if (frame == NULL) + return; + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS send FDMI PLOGI abts. iport->fabric.state: %d oxid: 0x%x", + iport->fcid, iport->fabric.state, iport->active_oxid_fdmi_plogi); fnic_send_fcoe_frame(iport, frame, frame_size); } else { if (iport->fabric.fdmi_pending & FDLS_FDMI_REG_HBA_PENDING) { - oxid = iport->active_oxid_fdmi_rhba; - FNIC_STD_SET_OX_ID(*pfabric_abts, oxid); + frame = fdls_alloc_init_fdmi_abts_frame(iport, + iport->active_oxid_fdmi_rhba); + if (frame == NULL) + return; + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS send FDMI RHBA abts. iport->fabric.state: %d oxid: 0x%x", + iport->fcid, iport->fabric.state, iport->active_oxid_fdmi_rhba); fnic_send_fcoe_frame(iport, frame, frame_size); } if (iport->fabric.fdmi_pending & FDLS_FDMI_RPA_PENDING) { - oxid = iport->active_oxid_fdmi_rpa; - FNIC_STD_SET_OX_ID(*pfabric_abts, oxid); + frame = fdls_alloc_init_fdmi_abts_frame(iport, + iport->active_oxid_fdmi_rpa); + if (frame == NULL) { + if (iport->fabric.fdmi_pending & FDLS_FDMI_REG_HBA_PENDING) + goto arm_timer; + else + return; + } + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS send FDMI RPA abts. iport->fabric.state: %d oxid: 0x%x", + iport->fcid, iport->fabric.state, iport->active_oxid_fdmi_rpa); fnic_send_fcoe_frame(iport, frame, frame_size); } } +arm_timer: fdmi_tov = jiffies + msecs_to_jiffies(2 * iport->e_d_tov); mod_timer(&iport->fabric.fdmi_timer, round_jiffies(fdmi_tov)); iport->fabric.fdmi_pending |= FDLS_FDMI_ABORT_PENDING; + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: iport->fabric.fdmi_pending: 0x%x", + iport->fcid, iport->fabric.fdmi_pending); } static void fdls_send_fabric_flogi(struct fnic_iport_s *iport) @@ -2245,6 +2281,21 @@ void fdls_fabric_timer_callback(struct timer_list *t) spin_unlock_irqrestore(&fnic->fnic_lock, flags); } +void fdls_fdmi_retry_plogi(struct fnic_iport_s *iport) +{ + struct fnic *fnic = iport->fnic; + + iport->fabric.fdmi_pending = 0; + /* If max retries not exhausted, start over from fdmi plogi */ + if (iport->fabric.fdmi_retry < FDLS_FDMI_MAX_RETRY) { + iport->fabric.fdmi_retry++; + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Retry FDMI PLOGI. FDMI retry: %d", + iport->fabric.fdmi_retry); + fdls_send_fdmi_plogi(iport); + } +} + void fdls_fdmi_timer_callback(struct timer_list *t) { struct fnic_fdls_fabric_s *fabric = timer_container_of(fabric, t, @@ -2257,7 +2308,7 @@ void fdls_fdmi_timer_callback(struct timer_list *t) spin_lock_irqsave(&fnic->fnic_lock, flags); FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, - "fdmi timer callback : 0x%x\n", iport->fabric.fdmi_pending); + "iport->fabric.fdmi_pending: 0x%x\n", iport->fabric.fdmi_pending); if (!iport->fabric.fdmi_pending) { /* timer expired after fdmi responses received. */ @@ -2265,7 +2316,7 @@ void fdls_fdmi_timer_callback(struct timer_list *t) return; } FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, - "fdmi timer callback : 0x%x\n", iport->fabric.fdmi_pending); + "iport->fabric.fdmi_pending: 0x%x\n", iport->fabric.fdmi_pending); /* if not abort pending, send an abort */ if (!(iport->fabric.fdmi_pending & FDLS_FDMI_ABORT_PENDING)) { @@ -2274,33 +2325,37 @@ void fdls_fdmi_timer_callback(struct timer_list *t) return; } FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, - "fdmi timer callback : 0x%x\n", iport->fabric.fdmi_pending); + "iport->fabric.fdmi_pending: 0x%x\n", iport->fabric.fdmi_pending); /* ABTS pending for an active fdmi request that is pending. * That means FDMI ABTS timed out * Schedule to free the OXID after 2*r_a_tov and proceed */ if (iport->fabric.fdmi_pending & FDLS_FDMI_PLOGI_PENDING) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FDMI PLOGI ABTS timed out. Schedule oxid free: 0x%x\n", + iport->active_oxid_fdmi_plogi); fdls_schedule_oxid_free(iport, &iport->active_oxid_fdmi_plogi); } else { - if (iport->fabric.fdmi_pending & FDLS_FDMI_REG_HBA_PENDING) + if (iport->fabric.fdmi_pending & FDLS_FDMI_REG_HBA_PENDING) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FDMI RHBA ABTS timed out. Schedule oxid free: 0x%x\n", + iport->active_oxid_fdmi_rhba); fdls_schedule_oxid_free(iport, &iport->active_oxid_fdmi_rhba); - if (iport->fabric.fdmi_pending & FDLS_FDMI_RPA_PENDING) + } + if (iport->fabric.fdmi_pending & FDLS_FDMI_RPA_PENDING) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FDMI RPA ABTS timed out. Schedule oxid free: 0x%x\n", + iport->active_oxid_fdmi_rpa); fdls_schedule_oxid_free(iport, &iport->active_oxid_fdmi_rpa); + } } FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, - "fdmi timer callback : 0x%x\n", iport->fabric.fdmi_pending); + "iport->fabric.fdmi_pending: 0x%x\n", iport->fabric.fdmi_pending); - iport->fabric.fdmi_pending = 0; - /* If max retries not exhaused, start over from fdmi plogi */ - if (iport->fabric.fdmi_retry < FDLS_FDMI_MAX_RETRY) { - iport->fabric.fdmi_retry++; - FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, - "retry fdmi timer %d", iport->fabric.fdmi_retry); - fdls_send_fdmi_plogi(iport); - } + fdls_fdmi_retry_plogi(iport); FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, - "fdmi timer callback : 0x%x\n", iport->fabric.fdmi_pending); + "iport->fabric.fdmi_pending: 0x%x\n", iport->fabric.fdmi_pending); spin_unlock_irqrestore(&fnic->fnic_lock, flags); } @@ -3715,13 +3770,60 @@ static void fdls_process_fdmi_abts_rsp(struct fnic_iport_s *iport, switch (FNIC_FRAME_TYPE(oxid)) { case FNIC_FRAME_TYPE_FDMI_PLOGI: + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received FDMI PLOGI ABTS rsp with oxid: 0x%x", oxid); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: iport->fabric.fdmi_pending: 0x%x", + iport->fcid, iport->fabric.fdmi_pending); fdls_free_oxid(iport, oxid, &iport->active_oxid_fdmi_plogi); + + iport->fabric.fdmi_pending &= ~FDLS_FDMI_PLOGI_PENDING; + iport->fabric.fdmi_pending &= ~FDLS_FDMI_ABORT_PENDING; + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: iport->fabric.fdmi_pending: 0x%x", + iport->fcid, iport->fabric.fdmi_pending); break; case FNIC_FRAME_TYPE_FDMI_RHBA: + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received FDMI RHBA ABTS rsp with oxid: 0x%x", oxid); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: iport->fabric.fdmi_pending: 0x%x", + iport->fcid, iport->fabric.fdmi_pending); + + iport->fabric.fdmi_pending &= ~FDLS_FDMI_REG_HBA_PENDING; + + /* If RPA is still pending, don't turn off ABORT PENDING. + * We count on the timer to detect the ABTS timeout and take + * corrective action. + */ + if (!(iport->fabric.fdmi_pending & FDLS_FDMI_RPA_PENDING)) + iport->fabric.fdmi_pending &= ~FDLS_FDMI_ABORT_PENDING; + fdls_free_oxid(iport, oxid, &iport->active_oxid_fdmi_rhba); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: iport->fabric.fdmi_pending: 0x%x", + iport->fcid, iport->fabric.fdmi_pending); break; case FNIC_FRAME_TYPE_FDMI_RPA: + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received FDMI RPA ABTS rsp with oxid: 0x%x", oxid); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: iport->fabric.fdmi_pending: 0x%x", + iport->fcid, iport->fabric.fdmi_pending); + + iport->fabric.fdmi_pending &= ~FDLS_FDMI_RPA_PENDING; + + /* If RHBA is still pending, don't turn off ABORT PENDING. + * We count on the timer to detect the ABTS timeout and take + * corrective action. + */ + if (!(iport->fabric.fdmi_pending & FDLS_FDMI_REG_HBA_PENDING)) + iport->fabric.fdmi_pending &= ~FDLS_FDMI_ABORT_PENDING; + fdls_free_oxid(iport, oxid, &iport->active_oxid_fdmi_rpa); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: iport->fabric.fdmi_pending: 0x%x", + iport->fcid, iport->fabric.fdmi_pending); break; default: FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, @@ -3730,10 +3832,16 @@ static void fdls_process_fdmi_abts_rsp(struct fnic_iport_s *iport, break; } - timer_delete_sync(&iport->fabric.fdmi_timer); - iport->fabric.fdmi_pending &= ~FDLS_FDMI_ABORT_PENDING; - - fdls_send_fdmi_plogi(iport); + /* + * Only if ABORT PENDING is off, delete the timer, and if no other + * operations are pending, retry FDMI. + * Otherwise, let the timer pop and take the appropriate action. + */ + if (!(iport->fabric.fdmi_pending & FDLS_FDMI_ABORT_PENDING)) { + timer_delete_sync(&iport->fabric.fdmi_timer); + if (!iport->fabric.fdmi_pending) + fdls_fdmi_retry_plogi(iport); + } } static void @@ -4972,9 +5080,12 @@ void fnic_fdls_link_down(struct fnic_iport_s *iport) fdls_delete_tport(iport, tport); } - if ((fnic_fdmi_support == 1) && (iport->fabric.fdmi_pending > 0)) { - timer_delete_sync(&iport->fabric.fdmi_timer); - iport->fabric.fdmi_pending = 0; + if (fnic_fdmi_support == 1) { + if (iport->fabric.fdmi_pending > 0) { + timer_delete_sync(&iport->fabric.fdmi_timer); + iport->fabric.fdmi_pending = 0; + } + iport->flags &= ~FNIC_FDMI_ACTIVE; } FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h index 6c5f6046b1f5..c2fdc6553e62 100644 --- a/drivers/scsi/fnic/fnic.h +++ b/drivers/scsi/fnic/fnic.h @@ -30,7 +30,7 @@ #define DRV_NAME "fnic" #define DRV_DESCRIPTION "Cisco FCoE HBA Driver" -#define DRV_VERSION "1.8.0.0" +#define DRV_VERSION "1.8.0.2" #define PFX DRV_NAME ": " #define DFX DRV_NAME "%d: " diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c index 1e8cd64f9a5c..103ab6f1f7cd 100644 --- a/drivers/scsi/fnic/fnic_fcs.c +++ b/drivers/scsi/fnic/fnic_fcs.c @@ -636,6 +636,8 @@ static int fnic_send_frame(struct fnic *fnic, void *frame, int frame_len) unsigned long flags; pa = dma_map_single(&fnic->pdev->dev, frame, frame_len, DMA_TO_DEVICE); + if (dma_mapping_error(&fnic->pdev->dev, pa)) + return -ENOMEM; if ((fnic_fc_trace_set_data(fnic->fnic_num, FNIC_FC_SEND | 0x80, (char *) frame, diff --git a/drivers/scsi/fnic/fnic_fdls.h b/drivers/scsi/fnic/fnic_fdls.h index 8e610b65ad57..531d0b37e450 100644 --- a/drivers/scsi/fnic/fnic_fdls.h +++ b/drivers/scsi/fnic/fnic_fdls.h @@ -394,6 +394,7 @@ void fdls_send_tport_abts(struct fnic_iport_s *iport, bool fdls_delete_tport(struct fnic_iport_s *iport, struct fnic_tport_s *tport); void fdls_fdmi_timer_callback(struct timer_list *t); +void fdls_fdmi_retry_plogi(struct fnic_iport_s *iport); /* fnic_fcs.c */ void fnic_fdls_init(struct fnic *fnic, int usefip); diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c index 7133b254cbe4..75b29a018d1f 100644 --- a/drivers/scsi/fnic/fnic_scsi.c +++ b/drivers/scsi/fnic/fnic_scsi.c @@ -1046,7 +1046,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, unsigned int cq_ind if (icmnd_cmpl->scsi_status == SAM_STAT_TASK_SET_FULL) atomic64_inc(&fnic_stats->misc_stats.queue_fulls); - FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "xfer_len: %llu", xfer_len); break; diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c index 24cd172905f3..4431698a5d78 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c @@ -2401,7 +2401,7 @@ static void slot_complete_v2_hw(struct hisi_hba *hisi_hba, slot_err_v2_hw(hisi_hba, task, slot, 2); if (ts->stat != SAS_DATA_UNDERRUN) - dev_info(dev, "erroneous completion iptt=%d task=%pK dev id=%d CQ hdr: 0x%x 0x%x 0x%x 0x%x Error info: 0x%x 0x%x 0x%x 0x%x\n", + dev_info(dev, "erroneous completion iptt=%d task=%p dev id=%d CQ hdr: 0x%x 0x%x 0x%x 0x%x Error info: 0x%x 0x%x 0x%x 0x%x\n", slot->idx, task, sas_dev->device_id, complete_hdr->dw0, complete_hdr->dw1, complete_hdr->act, complete_hdr->dw3, @@ -2467,7 +2467,7 @@ static void slot_complete_v2_hw(struct hisi_hba *hisi_hba, spin_lock_irqsave(&task->task_state_lock, flags); if (task->task_state_flags & SAS_TASK_STATE_ABORTED) { spin_unlock_irqrestore(&task->task_state_lock, flags); - dev_info(dev, "slot complete: task(%pK) aborted\n", task); + dev_info(dev, "slot complete: task(%p) aborted\n", task); return; } task->task_state_flags |= SAS_TASK_STATE_DONE; @@ -2478,7 +2478,7 @@ static void slot_complete_v2_hw(struct hisi_hba *hisi_hba, spin_lock_irqsave(&device->done_lock, flags); if (test_bit(SAS_HA_FROZEN, &ha->state)) { spin_unlock_irqrestore(&device->done_lock, flags); - dev_info(dev, "slot complete: task(%pK) ignored\n", + dev_info(dev, "slot complete: task(%p) ignored\n", task); return; } diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index bc5d5356dd00..2f3d61abab3a 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c @@ -2409,7 +2409,7 @@ static void slot_complete_v3_hw(struct hisi_hba *hisi_hba, if (slot_err_v3_hw(hisi_hba, task, slot)) { if (ts->stat != SAS_DATA_UNDERRUN) - dev_info(dev, "erroneous completion iptt=%d task=%pK dev id=%d addr=%016llx CQ hdr: 0x%x 0x%x 0x%x 0x%x Error info: 0x%x 0x%x 0x%x 0x%x\n", + dev_info(dev, "erroneous completion iptt=%d task=%p dev id=%d addr=%016llx CQ hdr: 0x%x 0x%x 0x%x 0x%x Error info: 0x%x 0x%x 0x%x 0x%x\n", slot->idx, task, sas_dev->device_id, SAS_ADDR(device->sas_addr), dw0, dw1, complete_hdr->act, dw3, @@ -2470,7 +2470,7 @@ static void slot_complete_v3_hw(struct hisi_hba *hisi_hba, spin_lock_irqsave(&task->task_state_lock, flags); if (task->task_state_flags & SAS_TASK_STATE_ABORTED) { spin_unlock_irqrestore(&task->task_state_lock, flags); - dev_info(dev, "slot complete: task(%pK) aborted\n", task); + dev_info(dev, "slot complete: task(%p) aborted\n", task); return; } task->task_state_flags |= SAS_TASK_STATE_DONE; @@ -2481,7 +2481,7 @@ static void slot_complete_v3_hw(struct hisi_hba *hisi_hba, spin_lock_irqsave(&device->done_lock, flags); if (test_bit(SAS_HA_FROZEN, &ha->state)) { spin_unlock_irqrestore(&device->done_lock, flags); - dev_info(dev, "slot complete: task(%pK) ignored\n", + dev_info(dev, "slot complete: task(%p) ignored\n", task); return; } diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index e021f1106bea..cc5d05dc395c 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c @@ -473,10 +473,17 @@ struct Scsi_Host *scsi_host_alloc(const struct scsi_host_template *sht, int priv else shost->max_sectors = SCSI_DEFAULT_MAX_SECTORS; - if (sht->max_segment_size) - shost->max_segment_size = sht->max_segment_size; - else - shost->max_segment_size = BLK_MAX_SEGMENT_SIZE; + shost->virt_boundary_mask = sht->virt_boundary_mask; + if (shost->virt_boundary_mask) { + WARN_ON_ONCE(sht->max_segment_size && + sht->max_segment_size != UINT_MAX); + shost->max_segment_size = UINT_MAX; + } else { + if (sht->max_segment_size) + shost->max_segment_size = sht->max_segment_size; + else + shost->max_segment_size = BLK_MAX_SEGMENT_SIZE; + } /* 32-byte (dword) is a common minimum for HBAs. */ if (sht->dma_alignment) @@ -492,9 +499,6 @@ struct Scsi_Host *scsi_host_alloc(const struct scsi_host_template *sht, int priv else shost->dma_boundary = 0xffffffff; - if (sht->virt_boundary_mask) - shost->virt_boundary_mask = sht->virt_boundary_mask; - device_initialize(&shost->shost_gendev); dev_set_name(&shost->shost_gendev, "host%d", shost->host_no); shost->shost_gendev.bus = &scsi_bus_type; diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c index 9e42230e42b8..5a3787f27369 100644 --- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c +++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c @@ -425,7 +425,7 @@ static void ibmvscsis_disconnect(struct work_struct *work) /* * check which state we are in and see if we - * should transitition to the new state + * should transition to the new state */ switch (vscsi->state) { /* Should never be called while in this state. */ diff --git a/drivers/scsi/ibmvscsi_tgt/libsrp.c b/drivers/scsi/ibmvscsi_tgt/libsrp.c index 8a0e28aec928..0ecad398ed3d 100644 --- a/drivers/scsi/ibmvscsi_tgt/libsrp.c +++ b/drivers/scsi/ibmvscsi_tgt/libsrp.c @@ -184,7 +184,8 @@ static int srp_direct_data(struct ibmvscsis_cmd *cmd, struct srp_direct_buf *md, err = rdma_io(cmd, sg, nsg, md, 1, dir, len); if (dma_map) - dma_unmap_sg(iue->target->dev, sg, nsg, DMA_BIDIRECTIONAL); + dma_unmap_sg(iue->target->dev, sg, cmd->se_cmd.t_data_nents, + DMA_BIDIRECTIONAL); return err; } @@ -256,7 +257,8 @@ static int srp_indirect_data(struct ibmvscsis_cmd *cmd, struct srp_cmd *srp_cmd, err = rdma_io(cmd, sg, nsg, md, nmd, dir, len); if (dma_map) - dma_unmap_sg(iue->target->dev, sg, nsg, DMA_BIDIRECTIONAL); + dma_unmap_sg(iue->target->dev, sg, cmd->se_cmd.t_data_nents, + DMA_BIDIRECTIONAL); free_mem: if (token && dma_map) { diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index 355a0bc0828e..bb89a2e33eb4 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c @@ -2904,7 +2904,7 @@ static void isci_request_io_request_complete(struct isci_host *ihost, task->total_xfer_len, task->data_dir); else /* unmap the sgl dma addresses */ dma_unmap_sg(&ihost->pdev->dev, task->scatter, - request->num_sg_entries, task->data_dir); + task->num_scatter, task->data_dir); break; case SAS_PROTOCOL_SMP: { struct scatterlist *sg = &task->smp_task.smp_req; diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 392d57e054db..c9f410c50978 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -3185,7 +3185,8 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size, return NULL; conn = cls_conn->dd_data; - conn->dd_data = cls_conn->dd_data + sizeof(*conn); + if (dd_size) + conn->dd_data = cls_conn->dd_data + sizeof(*conn); conn->session = session; conn->cls_conn = cls_conn; conn->c_stage = ISCSI_CONN_INITIAL_STAGE; diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index 7b4e7a61965a..cc093cdc9c69 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c @@ -252,7 +252,7 @@ static int sas_get_ata_command_set(struct domain_device *dev) return ata_dev_classify(&tf); } -int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy) +static int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy) { if (phy->attached_tproto & SAS_PROTOCOL_STP) dev->tproto = phy->attached_tproto; @@ -927,13 +927,7 @@ EXPORT_SYMBOL_GPL(sas_ata_schedule_reset); void sas_ata_wait_eh(struct domain_device *dev) { - struct ata_port *ap; - - if (!dev_is_sata(dev)) - return; - - ap = dev->sata_dev.ap; - ata_port_wait_eh(ap); + ata_port_wait_eh(dev->sata_dev.ap); } void sas_ata_device_link_abort(struct domain_device *device, bool force_reset) diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c index 951bdc554a10..b07062db50b2 100644 --- a/drivers/scsi/libsas/sas_discover.c +++ b/drivers/scsi/libsas/sas_discover.c @@ -406,7 +406,7 @@ void sas_unregister_dev(struct asd_sas_port *port, struct domain_device *dev) } } -void sas_unregister_domain_devices(struct asd_sas_port *port, int gone) +void sas_unregister_domain_devices(struct asd_sas_port *port, bool gone) { struct domain_device *dev, *n; diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h index 03d6ec1eb970..6706f2be8d27 100644 --- a/drivers/scsi/libsas/sas_internal.h +++ b/drivers/scsi/libsas/sas_internal.h @@ -44,7 +44,7 @@ void sas_hash_addr(u8 *hashed, const u8 *sas_addr); int sas_discover_root_expander(struct domain_device *dev); int sas_ex_revalidate_domain(struct domain_device *dev); -void sas_unregister_domain_devices(struct asd_sas_port *port, int gone); +void sas_unregister_domain_devices(struct asd_sas_port *port, bool gone); void sas_init_disc(struct sas_discovery *disc, struct asd_sas_port *port); void sas_discover_event(struct asd_sas_port *port, enum discover_event ev); @@ -70,7 +70,7 @@ void sas_enable_revalidation(struct sas_ha_struct *ha); void sas_queue_deferred_work(struct sas_ha_struct *ha); void __sas_drain_work(struct sas_ha_struct *ha); -void sas_deform_port(struct asd_sas_phy *phy, int gone); +void sas_deform_port(struct asd_sas_phy *phy, bool gone); void sas_porte_bytes_dmaed(struct work_struct *work); void sas_porte_broadcast_rcvd(struct work_struct *work); @@ -222,4 +222,78 @@ static inline void sas_put_device(struct domain_device *dev) kref_put(&dev->kref, sas_free_device); } +#ifdef CONFIG_SCSI_SAS_ATA + +int sas_ata_init(struct domain_device *dev); +void sas_ata_task_abort(struct sas_task *task); +int sas_discover_sata(struct domain_device *dev); +int sas_ata_add_dev(struct domain_device *parent, struct ex_phy *phy, + struct domain_device *child, int phy_id); +void sas_ata_strategy_handler(struct Scsi_Host *shost); +void sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q); +void sas_ata_end_eh(struct ata_port *ap); +void sas_ata_wait_eh(struct domain_device *dev); +void sas_probe_sata(struct asd_sas_port *port); +void sas_suspend_sata(struct asd_sas_port *port); +void sas_resume_sata(struct asd_sas_port *port); + +#else + +static inline int sas_ata_init(struct domain_device *dev) +{ + return 0; +} + +static inline void sas_ata_task_abort(struct sas_task *task) +{ +} + +static inline void sas_ata_strategy_handler(struct Scsi_Host *shost) +{ +} + +static inline void sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q) +{ +} + +static inline void sas_ata_end_eh(struct ata_port *ap) +{ +} + +static inline void sas_ata_wait_eh(struct domain_device *dev) +{ +} + +static inline void sas_probe_sata(struct asd_sas_port *port) +{ +} + +static inline void sas_suspend_sata(struct asd_sas_port *port) +{ +} + +static inline void sas_resume_sata(struct asd_sas_port *port) +{ +} + +static inline void sas_ata_disabled_notice(void) +{ + pr_notice_once("ATA device seen but CONFIG_SCSI_SAS_ATA=N\n"); +} + +static inline int sas_discover_sata(struct domain_device *dev) +{ + sas_ata_disabled_notice(); + return -ENXIO; +} + +static inline int sas_ata_add_dev(struct domain_device *parent, struct ex_phy *phy, + struct domain_device *child, int phy_id) +{ + sas_ata_disabled_notice(); + return -ENODEV; +} + +#endif + #endif /* _SAS_INTERNAL_H_ */ diff --git a/drivers/scsi/libsas/sas_phy.c b/drivers/scsi/libsas/sas_phy.c index 57494ac97076..635835c28ecd 100644 --- a/drivers/scsi/libsas/sas_phy.c +++ b/drivers/scsi/libsas/sas_phy.c @@ -20,7 +20,7 @@ static void sas_phye_loss_of_signal(struct work_struct *work) struct asd_sas_phy *phy = ev->phy; phy->error = 0; - sas_deform_port(phy, 1); + sas_deform_port(phy, true); } static void sas_phye_oob_done(struct work_struct *work) @@ -40,7 +40,7 @@ static void sas_phye_oob_error(struct work_struct *work) struct sas_internal *i = to_sas_internal(sas_ha->shost->transportt); - sas_deform_port(phy, 1); + sas_deform_port(phy, true); if (!port && phy->enabled && i->dft->lldd_control_phy) { phy->error++; @@ -85,7 +85,7 @@ static void sas_phye_resume_timeout(struct work_struct *work) phy->error = 0; phy->suspended = 0; - sas_deform_port(phy, 1); + sas_deform_port(phy, true); } diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c index e3f2ed913419..de7556070048 100644 --- a/drivers/scsi/libsas/sas_port.c +++ b/drivers/scsi/libsas/sas_port.c @@ -113,7 +113,7 @@ static void sas_form_port(struct asd_sas_phy *phy) if (port) { if (!phy_is_wideport_member(port, phy)) - sas_deform_port(phy, 0); + sas_deform_port(phy, false); else if (phy->suspended) { phy->suspended = 0; sas_resume_port(phy); @@ -206,7 +206,7 @@ static void sas_form_port(struct asd_sas_phy *phy) * This is called when the physical link to the other phy has been * lost (on this phy), in Event thread context. We cannot delay here. */ -void sas_deform_port(struct asd_sas_phy *phy, int gone) +void sas_deform_port(struct asd_sas_phy *phy, bool gone) { struct sas_ha_struct *sas_ha = phy->ha; struct asd_sas_port *port = phy->port; @@ -301,7 +301,7 @@ void sas_porte_link_reset_err(struct work_struct *work) struct asd_sas_event *ev = to_asd_sas_event(work); struct asd_sas_phy *phy = ev->phy; - sas_deform_port(phy, 1); + sas_deform_port(phy, true); } void sas_porte_timer_event(struct work_struct *work) @@ -309,7 +309,7 @@ void sas_porte_timer_event(struct work_struct *work) struct asd_sas_event *ev = to_asd_sas_event(work); struct asd_sas_phy *phy = ev->phy; - sas_deform_port(phy, 1); + sas_deform_port(phy, true); } void sas_porte_hard_reset(struct work_struct *work) @@ -317,7 +317,7 @@ void sas_porte_hard_reset(struct work_struct *work) struct asd_sas_event *ev = to_asd_sas_event(work); struct asd_sas_phy *phy = ev->phy; - sas_deform_port(phy, 1); + sas_deform_port(phy, true); } /* ---------- SAS port registration ---------- */ @@ -358,8 +358,7 @@ void sas_unregister_ports(struct sas_ha_struct *sas_ha) for (i = 0; i < sas_ha->num_phys; i++) if (sas_ha->sas_phy[i]->port) - sas_deform_port(sas_ha->sas_phy[i], 0); - + sas_deform_port(sas_ha->sas_phy[i], false); } const work_func_t sas_port_event_fns[PORT_NUM_EVENTS] = { diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 530dddd39bab..f93f8dca65bd 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -264,9 +264,9 @@ lpfc_ct_reject_event(struct lpfc_nodelist *ndlp, ct_free_mp: kfree(mp); ct_exit: - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, - "6440 Unsol CT: Rsp err %d Data: x%lx\n", - rc, vport->fc_flag); + lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS, + "6440 Unsol CT: Rsp err %d Data: x%lx\n", + rc, vport->fc_flag); } /** @@ -313,7 +313,7 @@ lpfc_ct_handle_mibreq(struct lpfc_hba *phba, struct lpfc_iocbq *ctiocbq) mi_cmd = be16_to_cpu(ct_req->CommandResponse.bits.CmdRsp); lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS, - "6442 MI Cmd : x%x Not Supported\n", mi_cmd); + "6442 MI Cmd: x%x Not Supported\n", mi_cmd); lpfc_ct_reject_event(ndlp, ct_req, bf_get(wqe_ctxt_tag, &ctiocbq->wqe.xmit_els_rsp.wqe_com), @@ -2229,21 +2229,6 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* Look for a retryable error */ if (ulp_status == IOSTAT_LOCAL_REJECT) { switch ((ulp_word4 & IOERR_PARAM_MASK)) { - case IOERR_SLI_ABORTED: - case IOERR_SLI_DOWN: - /* Driver aborted this IO. No retry as error - * is likely Offline->Online or some adapter - * error. Recovery will try again, but if port - * is not active there's no point to continue - * issuing follow up FDMI commands. - */ - if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) { - free_ndlp = cmdiocb->ndlp; - lpfc_ct_free_iocb(phba, cmdiocb); - lpfc_nlp_put(free_ndlp); - return; - } - break; case IOERR_ABORT_IN_PROGRESS: case IOERR_SEQUENCE_TIMEOUT: case IOERR_ILLEGAL_FRAME: @@ -2269,6 +2254,9 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, lpfc_ct_free_iocb(phba, cmdiocb); lpfc_nlp_put(free_ndlp); + if (ulp_status != IOSTAT_SUCCESS) + return; + ndlp = lpfc_findnode_did(vport, FDMI_DID); if (!ndlp) return; diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index 3fd1aa5cc78c..2c7d876c64c7 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2007-2015 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -6227,8 +6227,9 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) i++; } lpfc_debugfs_max_slow_ring_trc = (1 << i); - pr_err("lpfc_debugfs_max_disc_trc changed to " - "%d\n", lpfc_debugfs_max_disc_trc); + pr_info("lpfc_debugfs_max_slow_ring_trc " + "changed to %d\n", + lpfc_debugfs_max_slow_ring_trc); } } @@ -6260,7 +6261,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) atomic_set(&phba->nvmeio_trc_cnt, 0); if (lpfc_debugfs_max_nvmeio_trc) { num = lpfc_debugfs_max_nvmeio_trc - 1; - if (num & lpfc_debugfs_max_disc_trc) { + if (num & lpfc_debugfs_max_nvmeio_trc) { /* Change to be a power of 2 */ num = lpfc_debugfs_max_nvmeio_trc; i = 0; @@ -6269,10 +6270,9 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) i++; } lpfc_debugfs_max_nvmeio_trc = (1 << i); - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0575 lpfc_debugfs_max_nvmeio_trc " - "changed to %d\n", - lpfc_debugfs_max_nvmeio_trc); + pr_info("lpfc_debugfs_max_nvmeio_trc changed " + "to %d\n", + lpfc_debugfs_max_nvmeio_trc); } phba->nvmeio_trc_size = lpfc_debugfs_max_nvmeio_trc; @@ -6317,8 +6317,8 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) i++; } lpfc_debugfs_max_disc_trc = (1 << i); - pr_err("lpfc_debugfs_max_disc_trc changed to %d\n", - lpfc_debugfs_max_disc_trc); + pr_info("lpfc_debugfs_max_disc_trc changed to %d\n", + lpfc_debugfs_max_disc_trc); } } diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index b1a61eca8295..fca81e0c7c2e 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -7861,6 +7861,13 @@ lpfc_rscn_recovery_check(struct lpfc_vport *vport) /* Move all affected nodes by pending RSCNs to NPR state. */ list_for_each_entry_safe(ndlp, n, &vport->fc_nodes, nlp_listp) { + if (test_bit(FC_UNLOADING, &vport->load_flag)) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "1000 %s Unloading set\n", + __func__); + return 0; + } + if ((ndlp->nlp_state == NLP_STE_UNUSED_NODE) || !lpfc_rscn_payload_check(vport, ndlp->nlp_DID)) continue; @@ -8369,9 +8376,9 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, clear_bit(FC_PUBLIC_LOOP, &vport->fc_flag); lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "3311 Rcv Flogi PS x%x new PS x%x " - "fc_flag x%lx new fc_flag x%lx\n", + "fc_flag x%lx new fc_flag x%lx, hba_flag x%lx\n", port_state, vport->port_state, - fc_flag, vport->fc_flag); + fc_flag, vport->fc_flag, phba->hba_flag); /* * We temporarily set fc_myDID to make it look like we are diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index b88e54a7e65c..43d246c5c049 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -183,7 +183,8 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) /* Don't schedule a worker thread event if the vport is going down. */ if (test_bit(FC_UNLOADING, &vport->load_flag) || - !test_bit(HBA_SETUP, &phba->hba_flag)) { + (phba->sli_rev == LPFC_SLI_REV4 && + !test_bit(HBA_SETUP, &phba->hba_flag))) { spin_lock_irqsave(&ndlp->lock, iflags); ndlp->rport = NULL; @@ -1266,6 +1267,10 @@ lpfc_linkdown(struct lpfc_hba *phba) } phba->defer_flogi_acc.flag = false; + /* reinitialize initial HBA flag */ + clear_bit(HBA_FLOGI_ISSUED, &phba->hba_flag); + clear_bit(HBA_RHBA_CMPL, &phba->hba_flag); + /* Clear external loopback plug detected flag */ phba->link_flag &= ~LS_EXTERNAL_LOOPBACK; @@ -1436,10 +1441,6 @@ lpfc_linkup(struct lpfc_hba *phba) phba->pport->rcv_flogi_cnt = 0; spin_unlock_irq(shost->host_lock); - /* reinitialize initial HBA flag */ - clear_bit(HBA_FLOGI_ISSUED, &phba->hba_flag); - clear_bit(HBA_RHBA_CMPL, &phba->hba_flag); - return 0; } diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index 2dedb273b091..bc709786e6af 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2009-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -1328,6 +1328,9 @@ struct cq_context { #define LPFC_CQ_CNT_512 0x1 #define LPFC_CQ_CNT_1024 0x2 #define LPFC_CQ_CNT_WORD7 0x3 +#define lpfc_cq_context_cqe_sz_SHIFT 25 +#define lpfc_cq_context_cqe_sz_MASK 0x00000003 +#define lpfc_cq_context_cqe_sz_WORD word0 #define lpfc_cq_context_autovalid_SHIFT 15 #define lpfc_cq_context_autovalid_MASK 0x00000001 #define lpfc_cq_context_autovalid_WORD word0 @@ -1383,9 +1386,9 @@ struct lpfc_mbx_cq_create_set { #define lpfc_mbx_cq_create_set_valid_SHIFT 29 #define lpfc_mbx_cq_create_set_valid_MASK 0x00000001 #define lpfc_mbx_cq_create_set_valid_WORD word1 -#define lpfc_mbx_cq_create_set_cqe_cnt_SHIFT 27 -#define lpfc_mbx_cq_create_set_cqe_cnt_MASK 0x00000003 -#define lpfc_mbx_cq_create_set_cqe_cnt_WORD word1 +#define lpfc_mbx_cq_create_set_cqecnt_SHIFT 27 +#define lpfc_mbx_cq_create_set_cqecnt_MASK 0x00000003 +#define lpfc_mbx_cq_create_set_cqecnt_WORD word1 #define lpfc_mbx_cq_create_set_cqe_size_SHIFT 25 #define lpfc_mbx_cq_create_set_cqe_size_MASK 0x00000003 #define lpfc_mbx_cq_create_set_cqe_size_WORD word1 @@ -1398,13 +1401,16 @@ struct lpfc_mbx_cq_create_set { #define lpfc_mbx_cq_create_set_clswm_SHIFT 12 #define lpfc_mbx_cq_create_set_clswm_MASK 0x00000003 #define lpfc_mbx_cq_create_set_clswm_WORD word1 +#define lpfc_mbx_cq_create_set_cqe_cnt_hi_SHIFT 0 +#define lpfc_mbx_cq_create_set_cqe_cnt_hi_MASK 0x0000001F +#define lpfc_mbx_cq_create_set_cqe_cnt_hi_WORD word1 uint32_t word2; #define lpfc_mbx_cq_create_set_arm_SHIFT 31 #define lpfc_mbx_cq_create_set_arm_MASK 0x00000001 #define lpfc_mbx_cq_create_set_arm_WORD word2 -#define lpfc_mbx_cq_create_set_cq_cnt_SHIFT 16 -#define lpfc_mbx_cq_create_set_cq_cnt_MASK 0x00007FFF -#define lpfc_mbx_cq_create_set_cq_cnt_WORD word2 +#define lpfc_mbx_cq_create_set_cqe_cnt_lo_SHIFT 16 +#define lpfc_mbx_cq_create_set_cqe_cnt_lo_MASK 0x00007FFF +#define lpfc_mbx_cq_create_set_cqe_cnt_lo_WORD word2 #define lpfc_mbx_cq_create_set_num_cq_SHIFT 0 #define lpfc_mbx_cq_create_set_num_cq_MASK 0x0000FFFF #define lpfc_mbx_cq_create_set_num_cq_WORD word2 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 20fa450def03..4081d2a358ee 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -2627,27 +2627,33 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_BMID: - m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"}; + m = (typeof(m)){"LP1150", "PCI-X2", + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_BSMB: m = (typeof(m)){"LP111", "PCI-X2", "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_ZEPHYR: - m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; + m = (typeof(m)){"LPe11000", "PCIe", + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_ZEPHYR_SCSP: - m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; + m = (typeof(m)){"LPe11000", "PCIe", + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_ZEPHYR_DCSP: - m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"}; + m = (typeof(m)){"LP2105", "PCIe", + "Obsolete, Unsupported FCoE Adapter"}; GE = 1; break; case PCI_DEVICE_ID_ZMID: - m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"}; + m = (typeof(m)){"LPe1150", "PCIe", + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_ZSMB: - m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"}; + m = (typeof(m)){"LPe111", "PCIe", + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_LP101: m = (typeof(m)){"LP101", "PCI-X", @@ -2666,22 +2672,28 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_SAT: - m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"}; + m = (typeof(m)){"LPe12000", "PCIe", + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_SAT_MID: - m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"}; + m = (typeof(m)){"LPe1250", "PCIe", + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_SAT_SMB: - m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"}; + m = (typeof(m)){"LPe121", "PCIe", + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_SAT_DCSP: - m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"}; + m = (typeof(m)){"LPe12002-SP", "PCIe", + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_SAT_SCSP: - m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"}; + m = (typeof(m)){"LPe12000-SP", "PCIe", + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_SAT_S: - m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"}; + m = (typeof(m)){"LPe12000-S", "PCIe", + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_PROTEUS_VF: m = (typeof(m)){"LPev12000", "PCIe IOV", @@ -2697,22 +2709,25 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) break; case PCI_DEVICE_ID_TIGERSHARK: oneConnect = 1; - m = (typeof(m)){"OCe10100", "PCIe", "FCoE"}; + m = (typeof(m)){"OCe10100", "PCIe", + "Obsolete, Unsupported FCoE Adapter"}; break; case PCI_DEVICE_ID_TOMCAT: oneConnect = 1; - m = (typeof(m)){"OCe11100", "PCIe", "FCoE"}; + m = (typeof(m)){"OCe11100", "PCIe", + "Obsolete, Unsupported FCoE Adapter"}; break; case PCI_DEVICE_ID_FALCON: m = (typeof(m)){"LPSe12002-ML1-E", "PCIe", - "EmulexSecure Fibre"}; + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_BALIUS: m = (typeof(m)){"LPVe12002", "PCIe Shared I/O", "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_LANCER_FC: - m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"}; + m = (typeof(m)){"LPe16000", "PCIe", + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_LANCER_FC_VF: m = (typeof(m)){"LPe16000", "PCIe", @@ -2720,12 +2735,13 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) break; case PCI_DEVICE_ID_LANCER_FCOE: oneConnect = 1; - m = (typeof(m)){"OCe15100", "PCIe", "FCoE"}; + m = (typeof(m)){"OCe15100", "PCIe", + "Obsolete, Unsupported FCoE Adapter"}; break; case PCI_DEVICE_ID_LANCER_FCOE_VF: oneConnect = 1; m = (typeof(m)){"OCe15100", "PCIe", - "Obsolete, Unsupported FCoE"}; + "Obsolete, Unsupported FCoE Adapter"}; break; case PCI_DEVICE_ID_LANCER_G6_FC: m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"}; @@ -2739,7 +2755,8 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) case PCI_DEVICE_ID_SKYHAWK: case PCI_DEVICE_ID_SKYHAWK_VF: oneConnect = 1; - m = (typeof(m)){"OCe14000", "PCIe", "FCoE"}; + m = (typeof(m)){"OCe14000", "PCIe", + "Obsolete, Unsupported FCoE Adapter"}; break; default: m = (typeof(m)){"Unknown", "", ""}; @@ -7919,8 +7936,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) int longs; int extra; uint64_t wwn; - u32 if_type; - u32 if_fam; phba->sli4_hba.num_present_cpu = lpfc_present_cpu; phba->sli4_hba.num_possible_cpu = cpumask_last(cpu_possible_mask) + 1; @@ -8180,28 +8195,11 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) */ rc = lpfc_get_sli4_parameters(phba, mboxq); if (rc) { - if_type = bf_get(lpfc_sli_intf_if_type, - &phba->sli4_hba.sli_intf); - if_fam = bf_get(lpfc_sli_intf_sli_family, - &phba->sli4_hba.sli_intf); - if (phba->sli4_hba.extents_in_use && - phba->sli4_hba.rpi_hdrs_in_use) { - lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, - "2999 Unsupported SLI4 Parameters " - "Extents and RPI headers enabled.\n"); - if (if_type == LPFC_SLI_INTF_IF_TYPE_0 && - if_fam == LPFC_SLI_INTF_FAMILY_BE2) { - mempool_free(mboxq, phba->mbox_mem_pool); - rc = -EIO; - goto out_free_bsmbx; - } - } - if (!(if_type == LPFC_SLI_INTF_IF_TYPE_0 && - if_fam == LPFC_SLI_INTF_FAMILY_BE2)) { - mempool_free(mboxq, phba->mbox_mem_pool); - rc = -EIO; - goto out_free_bsmbx; - } + lpfc_log_msg(phba, KERN_WARNING, LOG_INIT, + "2999 Could not get SLI4 parameters\n"); + rc = -EIO; + mempool_free(mboxq, phba->mbox_mem_pool); + goto out_free_bsmbx; } /* diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 8acb744febcd..508ceeecf2d9 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -390,6 +390,10 @@ lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport) if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) return; + /* may be called before queues established if hba_setup fails */ + if (!phba->sli4_hba.hdwq) + return; + spin_lock_irqsave(&phba->hbalock, iflag); for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { qp = &phba->sli4_hba.hdwq[idx]; @@ -532,7 +536,8 @@ lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba, psb = container_of(iocbq, struct lpfc_io_buf, cur_iocbq); psb->flags &= ~LPFC_SBUF_XBUSY; spin_unlock_irqrestore(&phba->hbalock, iflag); - if (!list_empty(&pring->txq)) + if (test_bit(HBA_SETUP, &phba->hba_flag) && + !list_empty(&pring->txq)) lpfc_worker_wake_up(phba); return; } diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 47bbcb78fb4d..a8fbdf7119d8 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -5167,7 +5167,6 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba) phba->link_events = 0; phba->pport->fc_myDID = 0; phba->pport->fc_prevDID = 0; - clear_bit(HBA_SETUP, &phba->hba_flag); spin_lock_irq(&phba->hbalock); psli->sli_flag &= ~(LPFC_PROCESS_LA); @@ -5284,6 +5283,7 @@ lpfc_sli_brdrestart_s4(struct lpfc_hba *phba) "0296 Restart HBA Data: x%x x%x\n", phba->pport->port_state, psli->sli_flag); + clear_bit(HBA_SETUP, &phba->hba_flag); lpfc_sli4_queue_unset(phba); rc = lpfc_sli4_brdreset(phba); @@ -16477,10 +16477,10 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp, case 4096: if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) { - bf_set(lpfc_mbx_cq_create_set_cqe_cnt, + bf_set(lpfc_mbx_cq_create_set_cqe_cnt_lo, &cq_set->u.request, - cq->entry_count); - bf_set(lpfc_mbx_cq_create_set_cqe_cnt, + cq->entry_count); + bf_set(lpfc_mbx_cq_create_set_cqecnt, &cq_set->u.request, LPFC_CQ_CNT_WORD7); break; @@ -16496,15 +16496,15 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp, } fallthrough; /* otherwise default to smallest */ case 256: - bf_set(lpfc_mbx_cq_create_set_cqe_cnt, + bf_set(lpfc_mbx_cq_create_set_cqecnt, &cq_set->u.request, LPFC_CQ_CNT_256); break; case 512: - bf_set(lpfc_mbx_cq_create_set_cqe_cnt, + bf_set(lpfc_mbx_cq_create_set_cqecnt, &cq_set->u.request, LPFC_CQ_CNT_512); break; case 1024: - bf_set(lpfc_mbx_cq_create_set_cqe_cnt, + bf_set(lpfc_mbx_cq_create_set_cqecnt, &cq_set->u.request, LPFC_CQ_CNT_1024); break; } diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index 9be3da91c923..fd6dab157887 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2009-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -575,8 +575,10 @@ struct lpfc_pc_sli4_params { #define LPFC_CQ_4K_PAGE_SZ 0x1 #define LPFC_CQ_16K_PAGE_SZ 0x4 +#define LPFC_CQ_32K_PAGE_SZ 0x8 #define LPFC_WQ_4K_PAGE_SZ 0x1 #define LPFC_WQ_16K_PAGE_SZ 0x4 +#define LPFC_WQ_32K_PAGE_SZ 0x8 struct lpfc_iov { uint32_t pf_number; diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 749688aa8a82..9ee3a3a4ec4d 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -20,7 +20,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "14.4.0.9" +#define LPFC_DRIVER_VERSION "14.4.0.10" #define LPFC_DRIVER_NAME "lpfc" /* Used for SLI 2/3 */ diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 3aac0e17cb00..9179f8aee964 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -5910,7 +5910,11 @@ megasas_set_high_iops_queue_affinity_and_hint(struct megasas_instance *instance) const struct cpumask *mask; if (instance->perf_mode == MR_BALANCED_PERF_MODE) { - mask = cpumask_of_node(dev_to_node(&instance->pdev->dev)); + int nid = dev_to_node(&instance->pdev->dev); + + if (nid == NUMA_NO_NODE) + nid = 0; + mask = cpumask_of_node(nid); for (i = 0; i < instance->low_latency_index_start; i++) { irq = pci_irq_vector(instance->pdev, i); diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h index 9bbc7cb98ca3..8d4ef49e04d1 100644 --- a/drivers/scsi/mpi3mr/mpi3mr.h +++ b/drivers/scsi/mpi3mr/mpi3mr.h @@ -56,8 +56,8 @@ extern struct list_head mrioc_list; extern int prot_mask; extern atomic64_t event_counter; -#define MPI3MR_DRIVER_VERSION "8.13.0.5.50" -#define MPI3MR_DRIVER_RELDATE "20-February-2025" +#define MPI3MR_DRIVER_VERSION "8.14.0.5.50" +#define MPI3MR_DRIVER_RELDATE "27-June-2025" #define MPI3MR_DRIVER_NAME "mpi3mr" #define MPI3MR_DRIVER_LICENSE "GPL" @@ -1137,6 +1137,8 @@ struct scmd_priv { * @logdata_buf: Circular buffer to store log data entries * @logdata_buf_idx: Index of entry in buffer to store * @logdata_entry_sz: log data entry size + * @adm_req_q_bar_writeq_lock: Admin request queue lock + * @adm_reply_q_bar_writeq_lock: Admin reply queue lock * @pend_large_data_sz: Counter to track pending large data * @io_throttle_data_length: I/O size to track in 512b blocks * @io_throttle_high: I/O size to start throttle in 512b blocks @@ -1185,7 +1187,7 @@ struct mpi3mr_ioc { char name[MPI3MR_NAME_LENGTH]; char driver_name[MPI3MR_NAME_LENGTH]; - volatile struct mpi3_sysif_registers __iomem *sysif_regs; + struct mpi3_sysif_registers __iomem *sysif_regs; resource_size_t sysif_regs_phys; int bars; u64 dma_mask; @@ -1339,6 +1341,8 @@ struct mpi3mr_ioc { u8 *logdata_buf; u16 logdata_buf_idx; u16 logdata_entry_sz; + spinlock_t adm_req_q_bar_writeq_lock; + spinlock_t adm_reply_q_bar_writeq_lock; atomic_t pend_large_data_sz; u32 io_throttle_data_length; diff --git a/drivers/scsi/mpi3mr/mpi3mr_app.c b/drivers/scsi/mpi3mr/mpi3mr_app.c index f36663613950..0e5478d62580 100644 --- a/drivers/scsi/mpi3mr/mpi3mr_app.c +++ b/drivers/scsi/mpi3mr/mpi3mr_app.c @@ -795,9 +795,8 @@ void mpi3mr_release_diag_bufs(struct mpi3mr_ioc *mrioc, u8 skip_rel_action) * * @hdb: HDB pointer * @type: Trigger type - * @data: Trigger data - * @force: Trigger overwrite flag * @trigger_data: Pointer to trigger data information + * @force: Trigger overwrite flag * * Updates trigger type and trigger data based on parameter * passed to this function @@ -822,9 +821,8 @@ void mpi3mr_set_trigger_data_in_hdb(struct diag_buffer_desc *hdb, * * @mrioc: Adapter instance reference * @type: Trigger type - * @data: Trigger data - * @force: Trigger overwrite flag * @trigger_data: Pointer to trigger data information + * @force: Trigger overwrite flag * * Updates trigger type and trigger data based on parameter * passed to this function @@ -3388,6 +3386,8 @@ static DEVICE_ATTR_RO(persistent_id); * @buf: the buffer returned * * A sysfs 'read-only' sdev attribute, only works with SATA devices + * + * Returns: the number of characters written to @buf */ static ssize_t sas_ncq_prio_supported_show(struct device *dev, @@ -3406,6 +3406,8 @@ static DEVICE_ATTR_RO(sas_ncq_prio_supported); * @buf: the buffer returned * * A sysfs 'read/write' sdev attribute, only works with SATA devices + * + * Returns: the number of characters written to @buf */ static ssize_t sas_ncq_prio_enable_show(struct device *dev, diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c index 1d7901a8f0e4..0152d31d430a 100644 --- a/drivers/scsi/mpi3mr/mpi3mr_fw.c +++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c @@ -23,17 +23,22 @@ module_param(poll_queues, int, 0444); MODULE_PARM_DESC(poll_queues, "Number of queues for io_uring poll mode. (Range 1 - 126)"); #if defined(writeq) && defined(CONFIG_64BIT) -static inline void mpi3mr_writeq(__u64 b, volatile void __iomem *addr) +static inline void mpi3mr_writeq(__u64 b, void __iomem *addr, + spinlock_t *write_queue_lock) { writeq(b, addr); } #else -static inline void mpi3mr_writeq(__u64 b, volatile void __iomem *addr) +static inline void mpi3mr_writeq(__u64 b, void __iomem *addr, + spinlock_t *write_queue_lock) { __u64 data_out = b; + unsigned long flags; + spin_lock_irqsave(write_queue_lock, flags); writel((u32)(data_out), addr); writel((u32)(data_out >> 32), (addr + 4)); + spin_unlock_irqrestore(write_queue_lock, flags); } #endif @@ -428,8 +433,8 @@ static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc, MPI3MR_SENSE_BUF_SZ); } if (cmdptr->is_waiting) { - complete(&cmdptr->done); cmdptr->is_waiting = 0; + complete(&cmdptr->done); } else if (cmdptr->callback) cmdptr->callback(mrioc, cmdptr); } @@ -2954,9 +2959,11 @@ static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc) (mrioc->num_admin_req); writel(num_admin_entries, &mrioc->sysif_regs->admin_queue_num_entries); mpi3mr_writeq(mrioc->admin_req_dma, - &mrioc->sysif_regs->admin_request_queue_address); + &mrioc->sysif_regs->admin_request_queue_address, + &mrioc->adm_req_q_bar_writeq_lock); mpi3mr_writeq(mrioc->admin_reply_dma, - &mrioc->sysif_regs->admin_reply_queue_address); + &mrioc->sysif_regs->admin_reply_queue_address, + &mrioc->adm_reply_q_bar_writeq_lock); writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi); writel(mrioc->admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci); return retval; diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c index ce444efd859e..e467b56949e9 100644 --- a/drivers/scsi/mpi3mr/mpi3mr_os.c +++ b/drivers/scsi/mpi3mr/mpi3mr_os.c @@ -49,6 +49,13 @@ static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event, #define MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH (0xFFFE) +/* + * SAS Log info code for a NCQ collateral abort after an NCQ error: + * IOC_LOGINFO_PREFIX_PL | PL_LOGINFO_CODE_SATA_NCQ_FAIL_ALL_CMDS_AFTR_ERR + * See: drivers/message/fusion/lsi/mpi_log_sas.h + */ +#define IOC_LOGINFO_SATA_NCQ_FAIL_AFTER_ERR 0x31080000 + /** * mpi3mr_host_tag_for_scmd - Get host tag for a scmd * @mrioc: Adapter instance reference @@ -3430,7 +3437,18 @@ void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc, scmd->result = DID_NO_CONNECT << 16; break; case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED: - scmd->result = DID_SOFT_ERROR << 16; + if (ioc_loginfo == IOC_LOGINFO_SATA_NCQ_FAIL_AFTER_ERR) { + /* + * This is a ATA NCQ command aborted due to another NCQ + * command failure. We must retry this command + * immediately but without incrementing its retry + * counter. + */ + WARN_ON_ONCE(xfer_count != 0); + scmd->result = DID_IMM_RETRY << 16; + } else { + scmd->result = DID_SOFT_ERROR << 16; + } break; case MPI3_IOCSTATUS_SCSI_TASK_TERMINATED: case MPI3_IOCSTATUS_SCSI_EXT_TERMINATED: @@ -5365,6 +5383,8 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id) spin_lock_init(&mrioc->tgtdev_lock); spin_lock_init(&mrioc->watchdog_lock); spin_lock_init(&mrioc->chain_buf_lock); + spin_lock_init(&mrioc->adm_req_q_bar_writeq_lock); + spin_lock_init(&mrioc->adm_reply_q_bar_writeq_lock); spin_lock_init(&mrioc->sas_node_lock); spin_lock_init(&mrioc->trigger_lock); diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c index 02fc204b9bf7..3b951589feeb 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c +++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c @@ -2914,7 +2914,6 @@ int mpt3sas_send_mctp_passthru_req(struct mpt3_passthru_command *command) { struct MPT3SAS_ADAPTER *ioc; MPI2RequestHeader_t *mpi_request = NULL, *request; - MPI2DefaultReply_t *mpi_reply; Mpi26MctpPassthroughRequest_t *mctp_passthru_req; u16 smid; unsigned long timeout; @@ -3022,8 +3021,6 @@ int mpt3sas_send_mctp_passthru_req(struct mpt3_passthru_command *command) goto issue_host_reset; } - mpi_reply = ioc->ctl_cmds.reply; - /* copy out xdata to user */ if (data_in_sz) memcpy(command->data_in_buf_ptr, data_in, data_in_sz); diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index 508861e88d9f..967af259118e 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -195,6 +195,14 @@ struct sense_info { #define MPT3SAS_PORT_ENABLE_COMPLETE (0xFFFD) #define MPT3SAS_ABRT_TASK_SET (0xFFFE) #define MPT3SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF) + +/* + * SAS Log info code for a NCQ collateral abort after an NCQ error: + * IOC_LOGINFO_PREFIX_PL | PL_LOGINFO_CODE_SATA_NCQ_FAIL_ALL_CMDS_AFTR_ERR + * See: drivers/message/fusion/lsi/mpi_log_sas.h + */ +#define IOC_LOGINFO_SATA_NCQ_FAIL_AFTER_ERR 0x31080000 + /** * struct fw_event_work - firmware event struct * @list: link list framework @@ -5814,6 +5822,17 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) scmd->result = DID_TRANSPORT_DISRUPTED << 16; goto out; } + if (log_info == IOC_LOGINFO_SATA_NCQ_FAIL_AFTER_ERR) { + /* + * This is a ATA NCQ command aborted due to another NCQ + * command failure. We must retry this command + * immediately but without incrementing its retry + * counter. + */ + WARN_ON_ONCE(xfer_cnt != 0); + scmd->result = DID_IMM_RETRY << 16; + break; + } if (log_info == 0x31110630) { if (scmd->retries > 2) { scmd->result = DID_NO_CONNECT << 16; @@ -10790,8 +10809,7 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event) break; case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: _scsih_pcie_topology_change_event(ioc, fw_event); - ioc->current_event = NULL; - return; + break; } out: fw_event_work_put(fw_event); diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c index 6c46654b9cd9..15b3d9d55a4b 100644 --- a/drivers/scsi/mvsas/mv_sas.c +++ b/drivers/scsi/mvsas/mv_sas.c @@ -818,7 +818,7 @@ static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf dev_printk(KERN_ERR, mvi->dev, "mvsas prep failed[%d]!\n", rc); if (!sas_protocol_ata(task->task_proto)) if (n_elem) - dma_unmap_sg(mvi->dev, task->scatter, n_elem, + dma_unmap_sg(mvi->dev, task->scatter, task->num_scatter, task->data_dir); prep_out: return rc; @@ -864,7 +864,7 @@ static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task, if (!sas_protocol_ata(task->task_proto)) if (slot->n_elem) dma_unmap_sg(mvi->dev, task->scatter, - slot->n_elem, task->data_dir); + task->num_scatter, task->data_dir); switch (task->task_proto) { case SAS_PROTOCOL_SMP: diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h index 315f6a7523f0..334485bb2c12 100644 --- a/drivers/scsi/pm8001/pm8001_sas.h +++ b/drivers/scsi/pm8001/pm8001_sas.h @@ -170,6 +170,14 @@ struct forensic_data { #define SPCV_MSGU_CFG_TABLE_TRANSFER_DEBUG_INFO 0x80 #define MAIN_MERRDCTO_MERRDCES 0xA0/* DWORD 0x28) */ +/** + * enum fatal_error_reporter: Indicates the originator of the fatal error + */ +enum fatal_error_reporter { + REPORTER_DRIVER, + REPORTER_FIRMWARE, +}; + struct pm8001_dispatch { char *name; int (*chip_init)(struct pm8001_hba_info *pm8001_ha); @@ -715,6 +723,8 @@ ssize_t pm80xx_get_non_fatal_dump(struct device *cdev, struct device_attribute *attr, char *buf); ssize_t pm8001_get_gsm_dump(struct device *cdev, u32, char *buf); int pm80xx_fatal_errors(struct pm8001_hba_info *pm8001_ha); +void pm80xx_fatal_error_uevent_emit(struct pm8001_hba_info *pm8001_ha, + enum fatal_error_reporter error_reporter); void pm8001_free_dev(struct pm8001_device *pm8001_dev); /* ctl shared API */ extern const struct attribute_group *pm8001_host_groups[]; diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c index 5b373c53c036..c1bae995a412 100644 --- a/drivers/scsi/pm8001/pm80xx_hwi.c +++ b/drivers/scsi/pm8001/pm80xx_hwi.c @@ -1551,6 +1551,52 @@ static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha) return 0; } +/** + * pm80xx_fatal_error_uevent_emit - emits a single fatal error uevent + * @pm8001_ha: our hba card information + * @error_reporter: reporter of fatal error + */ +void pm80xx_fatal_error_uevent_emit(struct pm8001_hba_info *pm8001_ha, + enum fatal_error_reporter error_reporter) +{ + struct kobj_uevent_env *env; + + pm8001_dbg(pm8001_ha, FAIL, "emitting fatal error uevent"); + + env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL); + if (!env) + return; + + if (add_uevent_var(env, "DRIVER=%s", DRV_NAME)) + goto exit; + + if (add_uevent_var(env, "HBA_NUM=%u", pm8001_ha->id)) + goto exit; + + if (add_uevent_var(env, "EVENT_TYPE=FATAL_ERROR")) + goto exit; + + switch (error_reporter) { + case REPORTER_DRIVER: + if (add_uevent_var(env, "REPORTED_BY=DRIVER")) + goto exit; + break; + case REPORTER_FIRMWARE: + if (add_uevent_var(env, "REPORTED_BY=FIRMWARE")) + goto exit; + break; + default: + if (add_uevent_var(env, "REPORTED_BY=OTHER")) + goto exit; + break; + } + + kobject_uevent_env(&pm8001_ha->shost->shost_dev.kobj, KOBJ_CHANGE, env->envp); + +exit: + kfree(env); +} + /** * pm80xx_fatal_errors - returns non-zero *ONLY* when fatal errors * @pm8001_ha: our hba card information @@ -1580,6 +1626,7 @@ pm80xx_fatal_errors(struct pm8001_hba_info *pm8001_ha) "Fatal error SCRATCHPAD1 = 0x%x SCRATCHPAD2 = 0x%x SCRATCHPAD3 = 0x%x SCRATCHPAD_RSVD0 = 0x%x SCRATCHPAD_RSVD1 = 0x%x\n", scratch_pad1, scratch_pad2, scratch_pad3, scratch_pad_rsvd0, scratch_pad_rsvd1); + pm80xx_fatal_error_uevent_emit(pm8001_ha, REPORTER_DRIVER); ret = 1; } @@ -4039,6 +4086,7 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec) pm8001_dbg(pm8001_ha, FAIL, "Firmware Fatal error! Regval:0x%x\n", regval); + pm80xx_fatal_error_uevent_emit(pm8001_ha, REPORTER_FIRMWARE); pm8001_handle_event(pm8001_ha, NULL, IO_FATAL_ERROR); print_scratchpad_registers(pm8001_ha); return ret; @@ -4677,8 +4725,12 @@ pm80xx_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id) &pm8001_ha->phy[phy_id].dev_sas_addr, SAS_ADDR_SIZE); payload.sas_identify.phy_id = phy_id; - return pm8001_mpi_build_cmd(pm8001_ha, 0, opcode, &payload, + ret = pm8001_mpi_build_cmd(pm8001_ha, 0, opcode, &payload, sizeof(payload), 0); + if (ret < 0) + pm8001_tag_free(pm8001_ha, tag); + + return ret; } /** @@ -4704,8 +4756,12 @@ static int pm80xx_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha, payload.tag = cpu_to_le32(tag); payload.phy_id = cpu_to_le32(phy_id); - return pm8001_mpi_build_cmd(pm8001_ha, 0, opcode, &payload, + ret = pm8001_mpi_build_cmd(pm8001_ha, 0, opcode, &payload, sizeof(payload), 0); + if (ret < 0) + pm8001_tag_free(pm8001_ha, tag); + + return ret; } /* diff --git a/drivers/scsi/qla2xxx/Kconfig b/drivers/scsi/qla2xxx/Kconfig index a8b4314bfd6e..6946d7155bc2 100644 --- a/drivers/scsi/qla2xxx/Kconfig +++ b/drivers/scsi/qla2xxx/Kconfig @@ -25,11 +25,7 @@ config SCSI_QLA_FC Upon request, the driver caches the firmware image until the driver is unloaded. - Firmware images can be retrieved from: - - http://ldriver.qlogic.com/firmware/ - - They are also included in the linux-firmware tree as well. + Firmware images are included in the linux-firmware tree. config TCM_QLA2XXX tristate "TCM_QLA2XXX fabric module for QLogic 24xx+ series target mode HBAs" diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c index 08273520c777..43970caca7b3 100644 --- a/drivers/scsi/qla2xxx/qla_dfs.c +++ b/drivers/scsi/qla2xxx/qla_dfs.c @@ -179,10 +179,9 @@ qla2x00_dfs_tgt_port_database_show(struct seq_file *s, void *unused) struct qla_hw_data *ha = vha->hw; struct gid_list_info *gid_list; dma_addr_t gid_list_dma; - fc_port_t fc_port; char *id_iter; int rc, i; - uint16_t entries, loop_id; + uint16_t entries; seq_printf(s, "%s\n", vha->host_str); gid_list = dma_alloc_coherent(&ha->pdev->dev, @@ -205,18 +204,11 @@ qla2x00_dfs_tgt_port_database_show(struct seq_file *s, void *unused) seq_puts(s, "Port Name Port ID Loop ID\n"); for (i = 0; i < entries; i++) { - struct gid_list_info *gid = - (struct gid_list_info *)id_iter; - loop_id = le16_to_cpu(gid->loop_id); - memset(&fc_port, 0, sizeof(fc_port_t)); - - fc_port.loop_id = loop_id; - - rc = qla24xx_gpdb_wait(vha, &fc_port, 0); - seq_printf(s, "%8phC %02x%02x%02x %d\n", - fc_port.port_name, fc_port.d_id.b.domain, - fc_port.d_id.b.area, fc_port.d_id.b.al_pa, - fc_port.loop_id); + struct gid_list_info *gid = (struct gid_list_info *)id_iter; + + rc = qla24xx_print_fc_port_id(vha, s, le16_to_cpu(gid->loop_id)); + if (rc != QLA_SUCCESS) + break; id_iter += ha->gid_list_info_size; } out_free_id_list: diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index 03e50e8fc08d..145defc420f2 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -557,6 +557,7 @@ qla26xx_dport_diagnostics_v2(scsi_qla_host_t *, int qla24xx_send_mb_cmd(struct scsi_qla_host *, mbx_cmd_t *); int qla24xx_gpdb_wait(struct scsi_qla_host *, fc_port_t *, u8); +int qla24xx_print_fc_port_id(struct scsi_qla_host *, struct seq_file *, u16); int qla24xx_gidlist_wait(struct scsi_qla_host *, void *, dma_addr_t, uint16_t *); int __qla24xx_parse_gpdb(struct scsi_qla_host *, fc_port_t *, diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 514934dd6f80..be211ff22acb 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -8603,8 +8603,6 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr, return QLA_SUCCESS; } -#define QLA_FW_URL "http://ldriver.qlogic.com/firmware/" - int qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) { @@ -8622,8 +8620,6 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) if (!blob) { ql_log(ql_log_info, vha, 0x0083, "Firmware image unavailable.\n"); - ql_log(ql_log_info, vha, 0x0084, - "Firmware images can be retrieved from: "QLA_FW_URL ".\n"); return QLA_FUNCTION_FAILED; } diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 0cd6f3e14882..32eb0ce8b170 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -2147,7 +2147,7 @@ qla24xx_get_port_database(scsi_qla_host_t *vha, u16 nport_handle, pdb_dma = dma_map_single(&vha->hw->pdev->dev, pdb, sizeof(*pdb), DMA_FROM_DEVICE); - if (!pdb_dma) { + if (dma_mapping_error(&vha->hw->pdev->dev, pdb_dma)) { ql_log(ql_log_warn, vha, 0x1116, "Failed to map dma buffer.\n"); return QLA_MEMORY_ALLOC_FAILED; } @@ -6597,6 +6597,54 @@ int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp) return rval; } +int qla24xx_print_fc_port_id(struct scsi_qla_host *vha, struct seq_file *s, u16 loop_id) +{ + int rval = QLA_FUNCTION_FAILED; + dma_addr_t pd_dma; + struct port_database_24xx *pd; + struct qla_hw_data *ha = vha->hw; + mbx_cmd_t mc; + + if (!vha->hw->flags.fw_started) + goto done; + + pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); + if (pd == NULL) { + ql_log(ql_log_warn, vha, 0xd047, + "Failed to allocate port database structure.\n"); + goto done; + } + + memset(&mc, 0, sizeof(mc)); + mc.mb[0] = MBC_GET_PORT_DATABASE; + mc.mb[1] = loop_id; + mc.mb[2] = MSW(pd_dma); + mc.mb[3] = LSW(pd_dma); + mc.mb[6] = MSW(MSD(pd_dma)); + mc.mb[7] = LSW(MSD(pd_dma)); + mc.mb[9] = vha->vp_idx; + + rval = qla24xx_send_mb_cmd(vha, &mc); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x1193, "%s: fail\n", __func__); + goto done_free_sp; + } + + ql_dbg(ql_dbg_mbx, vha, 0x1197, "%s: %8phC done\n", + __func__, pd->port_name); + + seq_printf(s, "%8phC %02x%02x%02x %d\n", + pd->port_name, pd->port_id[0], + pd->port_id[1], pd->port_id[2], + loop_id); + +done_free_sp: + if (pd) + dma_pool_free(ha->s_dma_pool, pd, pd_dma); +done: + return rval; +} + /* * qla24xx_gpdb_wait * NOTE: Do not call this routine from DPC thread diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index d4141656b204..a39f1da4ce47 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c @@ -3420,6 +3420,8 @@ static int qla4xxx_alloc_pdu(struct iscsi_task *task, uint8_t opcode) task_data->data_dma = dma_map_single(&ha->pdev->dev, task->data, task->data_count, DMA_TO_DEVICE); + if (dma_mapping_error(&ha->pdev->dev, task_data->data_dma)) + return -ENOMEM; } DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n", diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index 518a252eb6aa..9a0f467264b3 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -242,9 +242,11 @@ EXPORT_SYMBOL(scsi_change_queue_depth); * specific SCSI device to determine if and when there is a * need to adjust the queue depth on the device. * - * Returns: 0 - No change needed, >0 - Adjust queue depth to this new depth, - * -1 - Drop back to untagged operation using host->cmd_per_lun - * as the untagged command depth + * Returns: + * * 0 - No change needed + * * >0 - Adjust queue depth to this new depth, + * * -1 - Drop back to untagged operation using host->cmd_per_lun as the + * untagged command depth * * Lock Status: None held on entry * @@ -708,20 +710,15 @@ void scsi_cdl_check(struct scsi_device *sdev) int scsi_cdl_enable(struct scsi_device *sdev, bool enable) { char buf[64]; - bool is_ata; int ret; if (!sdev->cdl_supported) return -EOPNOTSUPP; - rcu_read_lock(); - is_ata = rcu_dereference(sdev->vpd_pg89); - rcu_read_unlock(); - /* * For ATA devices, CDL needs to be enabled with a SET FEATURES command. */ - if (is_ata) { + if (sdev->is_ata) { struct scsi_mode_data data; struct scsi_sense_hdr sshdr; char *buf_data; diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index aef33d1e346a..353cb60e1abe 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -2674,8 +2674,10 @@ static int resp_rsup_tmfs(struct scsi_cmnd *scp, static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target) { /* Read-Write Error Recovery page for mode_sense */ - unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0, - 5, 0, 0xff, 0xff}; + static const unsigned char err_recov_pg[] = { + 0x1, 0xa, 0xc0, 11, 240, 0, 0, 0, + 5, 0, 0xff, 0xff + }; memcpy(p, err_recov_pg, sizeof(err_recov_pg)); if (1 == pcontrol) @@ -2685,8 +2687,10 @@ static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target) static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target) { /* Disconnect-Reconnect page for mode_sense */ - unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0}; + static const unsigned char disconnect_pg[] = { + 0x2, 0xe, 128, 128, 0, 10, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0 + }; memcpy(p, disconnect_pg, sizeof(disconnect_pg)); if (1 == pcontrol) @@ -2696,9 +2700,11 @@ static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target) static int resp_format_pg(unsigned char *p, int pcontrol, int target) { /* Format device page for mode_sense */ - unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0x40, 0, 0, 0}; + static const unsigned char format_pg[] = { + 0x3, 0x16, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0x40, 0, 0, 0 + }; memcpy(p, format_pg, sizeof(format_pg)); put_unaligned_be16(sdebug_sectors_per, p + 10); @@ -2716,10 +2722,14 @@ static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0, static int resp_caching_pg(unsigned char *p, int pcontrol, int target) { /* Caching page for mode_sense */ - unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; - unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0, - 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0}; + static const unsigned char ch_caching_pg[] = { + /* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + }; + static const unsigned char d_caching_pg[] = { + 0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0, + 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0 + }; if (SDEBUG_OPT_N_WCE & sdebug_opts) caching_pg[2] &= ~0x4; /* set WCE=0 (default WCE=1) */ @@ -2738,8 +2748,10 @@ static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target) { /* Control mode page for mode_sense */ unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0, 0, 0, 0, 0}; - unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0, - 0, 0, 0x2, 0x4b}; + static const unsigned char d_ctrl_m_pg[] = { + 0xa, 10, 2, 0, 0, 0, 0, 0, + 0, 0, 0x2, 0x4b + }; if (sdebug_dsense) ctrl_m_pg[2] |= 0x4; @@ -2794,10 +2806,14 @@ static int resp_grouping_m_pg(unsigned char *p, int pcontrol, int target) static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target) { /* Informational Exceptions control mode page for mode_sense */ - unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0, - 0, 0, 0x0, 0x0}; - unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0, - 0, 0, 0x0, 0x0}; + static const unsigned char ch_iec_m_pg[] = { + /* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0, + 0, 0, 0x0, 0x0 + }; + static const unsigned char d_iec_m_pg[] = { + 0x1c, 0xa, 0x08, 0, 0, 0, 0, 0, + 0, 0, 0x0, 0x0 + }; memcpy(p, iec_m_pg, sizeof(iec_m_pg)); if (1 == pcontrol) @@ -2809,8 +2825,9 @@ static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target) static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target) { /* SAS SSP mode page - short format for mode_sense */ - unsigned char sas_sf_m_pg[] = {0x19, 0x6, - 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0}; + static const unsigned char sas_sf_m_pg[] = { + 0x19, 0x6, 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0 + }; memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg)); if (1 == pcontrol) @@ -2854,9 +2871,10 @@ static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target, static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol) { /* SAS SSP shared protocol specific port mode subpage */ - unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - }; + static const unsigned char sas_sha_m_pg[] = { + 0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + }; memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg)); if (1 == pcontrol) @@ -2923,8 +2941,10 @@ static int process_medium_part_m_pg(struct sdebug_dev_info *devip, static int resp_compression_m_pg(unsigned char *p, int pcontrol, int target, unsigned char dce) { /* Compression page for mode_sense (tape) */ - unsigned char compression_pg[] = {0x0f, 14, 0x40, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 00, 00}; + static const unsigned char compression_pg[] = { + 0x0f, 14, 0x40, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0 + }; memcpy(p, compression_pg, sizeof(compression_pg)); if (dce) @@ -3282,9 +3302,10 @@ static int resp_mode_select(struct scsi_cmnd *scp, static int resp_temp_l_pg(unsigned char *arr) { - unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38, - 0x0, 0x1, 0x3, 0x2, 0x0, 65, - }; + static const unsigned char temp_l_pg[] = { + 0x0, 0x0, 0x3, 0x2, 0x0, 38, + 0x0, 0x1, 0x3, 0x2, 0x0, 65, + }; memcpy(arr, temp_l_pg, sizeof(temp_l_pg)); return sizeof(temp_l_pg); @@ -3292,8 +3313,9 @@ static int resp_temp_l_pg(unsigned char *arr) static int resp_ie_l_pg(unsigned char *arr) { - unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38, - }; + static const unsigned char ie_l_pg[] = { + 0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38, + }; memcpy(arr, ie_l_pg, sizeof(ie_l_pg)); if (iec_m_pg[2] & 0x4) { /* TEST bit set */ @@ -3305,11 +3327,12 @@ static int resp_ie_l_pg(unsigned char *arr) static int resp_env_rep_l_spg(unsigned char *arr) { - unsigned char env_rep_l_spg[] = {0x0, 0x0, 0x23, 0x8, - 0x0, 40, 72, 0xff, 45, 18, 0, 0, - 0x1, 0x0, 0x23, 0x8, - 0x0, 55, 72, 35, 55, 45, 0, 0, - }; + static const unsigned char env_rep_l_spg[] = { + 0x0, 0x0, 0x23, 0x8, + 0x0, 40, 72, 0xff, 45, 18, 0, 0, + 0x1, 0x0, 0x23, 0x8, + 0x0, 55, 72, 35, 55, 45, 0, 0, + }; memcpy(arr, env_rep_l_spg, sizeof(env_rep_l_spg)); return sizeof(env_rep_l_spg); @@ -8770,7 +8793,7 @@ static int sdebug_add_store(void) dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple); sip->dif_storep = vmalloc(dif_size); - pr_info("dif_storep %u bytes @ %pK\n", dif_size, + pr_info("dif_storep %u bytes @ %p\n", dif_size, sip->dif_storep); if (!sip->dif_storep) { diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c index 8ff679e67bbf..78346b2b69c9 100644 --- a/drivers/scsi/scsi_devinfo.c +++ b/drivers/scsi/scsi_devinfo.c @@ -269,17 +269,12 @@ static struct { static struct scsi_dev_info_list_table *scsi_devinfo_lookup_by_key(int key) { struct scsi_dev_info_list_table *devinfo_table; - int found = 0; list_for_each_entry(devinfo_table, &scsi_dev_info_list, node) - if (devinfo_table->key == key) { - found = 1; - break; - } - if (!found) - return ERR_PTR(-EINVAL); + if (devinfo_table->key == key) + return devinfo_table; - return devinfo_table; + return ERR_PTR(-EINVAL); } /* diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 144c72f0737a..0c65ecfedfbd 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1843,7 +1843,7 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx, * a function to initialize that data. */ if (shost->hostt->cmd_size && !shost->hostt->init_cmd_priv) - memset(cmd + 1, 0, shost->hostt->cmd_size); + memset(scsi_cmd_priv(cmd), 0, shost->hostt->cmd_size); if (!(req->rq_flags & RQF_DONTPREP)) { ret = scsi_prepare_cmd(req); diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 4833b8fe251b..3c6e089e80c3 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -909,7 +909,8 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result, sdev->model = (char *) (sdev->inquiry + 16); sdev->rev = (char *) (sdev->inquiry + 32); - if (strncmp(sdev->vendor, "ATA ", 8) == 0) { + sdev->is_ata = strncmp(sdev->vendor, "ATA ", 8) == 0; + if (sdev->is_ata) { /* * sata emulation layer device. This is a hack to work around * the SATL power management specifications which state that @@ -1899,7 +1900,7 @@ int scsi_scan_host_selected(struct Scsi_Host *shost, unsigned int channel, return 0; } - +EXPORT_SYMBOL(scsi_scan_host_selected); static void scsi_sysfs_add_devices(struct Scsi_Host *shost) { struct scsi_device *sdev; diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index d772258e29ad..e6464b998960 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -265,7 +265,7 @@ show_shost_supported_mode(struct device *dev, struct device_attribute *attr, return show_shost_mode(supported_mode, buf); } -static DEVICE_ATTR(supported_mode, S_IRUGO | S_IWUSR, show_shost_supported_mode, NULL); +static DEVICE_ATTR(supported_mode, S_IRUGO, show_shost_supported_mode, NULL); static ssize_t show_shost_active_mode(struct device *dev, @@ -279,7 +279,7 @@ show_shost_active_mode(struct device *dev, return show_shost_mode(shost->active_mode, buf); } -static DEVICE_ATTR(active_mode, S_IRUGO | S_IWUSR, show_shost_active_mode, NULL); +static DEVICE_ATTR(active_mode, S_IRUGO, show_shost_active_mode, NULL); static int check_reset_type(const char *str) { diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index 6b165a3ec6de..3a821afee9bc 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c @@ -446,13 +446,6 @@ static int fc_host_setup(struct transport_container *tc, struct device *dev, return -ENOMEM; fc_host->dev_loss_tmo = fc_dev_loss_tmo; - fc_host->devloss_work_q = alloc_workqueue("fc_dl_%d", 0, 0, - shost->host_no); - if (!fc_host->devloss_work_q) { - destroy_workqueue(fc_host->work_q); - fc_host->work_q = NULL; - return -ENOMEM; - } fc_bsg_hostadd(shost, fc_host); /* ignore any bsg add error - we just can't do sgio */ @@ -2814,6 +2807,7 @@ fc_flush_work(struct Scsi_Host *shost) /** * fc_queue_devloss_work - Schedule work for the fc_host devloss workqueue. * @shost: Pointer to Scsi_Host bound to fc_host. + * @rport: rport associated with the devloss work * @work: Work to queue for execution. * @delay: jiffies to delay the work queuing * @@ -2821,10 +2815,10 @@ fc_flush_work(struct Scsi_Host *shost) * 1 on success / 0 already queued / < 0 for error */ static int -fc_queue_devloss_work(struct Scsi_Host *shost, struct delayed_work *work, - unsigned long delay) +fc_queue_devloss_work(struct Scsi_Host *shost, struct fc_rport *rport, + struct delayed_work *work, unsigned long delay) { - if (unlikely(!fc_host_devloss_work_q(shost))) { + if (unlikely(!rport->devloss_work_q)) { printk(KERN_ERR "ERROR: FC host '%s' attempted to queue work, " "when no workqueue created.\n", shost->hostt->name); @@ -2833,17 +2827,18 @@ fc_queue_devloss_work(struct Scsi_Host *shost, struct delayed_work *work, return -EINVAL; } - return queue_delayed_work(fc_host_devloss_work_q(shost), work, delay); + return queue_delayed_work(rport->devloss_work_q, work, delay); } /** * fc_flush_devloss - Flush a fc_host's devloss workqueue. * @shost: Pointer to Scsi_Host bound to fc_host. + * @rport: rport associated with the devloss work */ static void -fc_flush_devloss(struct Scsi_Host *shost) +fc_flush_devloss(struct Scsi_Host *shost, struct fc_rport *rport) { - if (!fc_host_devloss_work_q(shost)) { + if (unlikely(!rport->devloss_work_q)) { printk(KERN_ERR "ERROR: FC host '%s' attempted to flush work, " "when no workqueue created.\n", shost->hostt->name); @@ -2851,7 +2846,7 @@ fc_flush_devloss(struct Scsi_Host *shost) return; } - flush_workqueue(fc_host_devloss_work_q(shost)); + flush_workqueue(rport->devloss_work_q); } @@ -2913,13 +2908,6 @@ fc_remove_host(struct Scsi_Host *shost) fc_host->work_q = NULL; destroy_workqueue(work_q); } - - /* flush all devloss work items, then kill it */ - if (fc_host->devloss_work_q) { - work_q = fc_host->devloss_work_q; - fc_host->devloss_work_q = NULL; - destroy_workqueue(work_q); - } } EXPORT_SYMBOL(fc_remove_host); @@ -2967,6 +2955,7 @@ fc_rport_final_delete(struct work_struct *work) struct device *dev = &rport->dev; struct Scsi_Host *shost = rport_to_shost(rport); struct fc_internal *i = to_fc_internal(shost->transportt); + struct workqueue_struct *work_q; unsigned long flags; int do_callback = 0; @@ -2988,9 +2977,9 @@ fc_rport_final_delete(struct work_struct *work) if (rport->flags & FC_RPORT_DEVLOSS_PENDING) { spin_unlock_irqrestore(shost->host_lock, flags); if (!cancel_delayed_work(&rport->fail_io_work)) - fc_flush_devloss(shost); + fc_flush_devloss(shost, rport); if (!cancel_delayed_work(&rport->dev_loss_work)) - fc_flush_devloss(shost); + fc_flush_devloss(shost, rport); cancel_work_sync(&rport->scan_work); spin_lock_irqsave(shost->host_lock, flags); rport->flags &= ~FC_RPORT_DEVLOSS_PENDING; @@ -3021,6 +3010,12 @@ fc_rport_final_delete(struct work_struct *work) fc_bsg_remove(rport->rqst_q); + if (rport->devloss_work_q) { + work_q = rport->devloss_work_q; + rport->devloss_work_q = NULL; + destroy_workqueue(work_q); + } + transport_remove_device(dev); device_del(dev); transport_destroy_device(dev); @@ -3093,6 +3088,22 @@ fc_remote_port_create(struct Scsi_Host *shost, int channel, spin_unlock_irqrestore(shost->host_lock, flags); + rport->devloss_work_q = alloc_workqueue("fc_dl_%d_%d", 0, 0, + shost->host_no, rport->number); + if (!rport->devloss_work_q) { + printk(KERN_ERR "FC Remote Port alloc_workqueue failed\n"); +/* + * Note that we have not yet called device_initialize() / get_device() + * Cannot reclaim incremented rport->number because we released host_lock + */ + spin_lock_irqsave(shost->host_lock, flags); + list_del(&rport->peers); + scsi_host_put(shost); /* for fc_host->rport list */ + spin_unlock_irqrestore(shost->host_lock, flags); + kfree(rport); + return NULL; + } + dev = &rport->dev; device_initialize(dev); /* takes self reference */ dev->parent = get_device(&shost->shost_gendev); /* parent reference */ @@ -3255,9 +3266,9 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel, * be checked and will NOOP the function. */ if (!cancel_delayed_work(&rport->fail_io_work)) - fc_flush_devloss(shost); + fc_flush_devloss(shost, rport); if (!cancel_delayed_work(&rport->dev_loss_work)) - fc_flush_devloss(shost); + fc_flush_devloss(shost, rport); spin_lock_irqsave(shost->host_lock, flags); @@ -3451,11 +3462,12 @@ fc_remote_port_delete(struct fc_rport *rport) /* see if we need to kill io faster than waiting for device loss */ if ((rport->fast_io_fail_tmo != -1) && (rport->fast_io_fail_tmo < timeout)) - fc_queue_devloss_work(shost, &rport->fail_io_work, - rport->fast_io_fail_tmo * HZ); + fc_queue_devloss_work(shost, rport, &rport->fail_io_work, + rport->fast_io_fail_tmo * HZ); /* cap the length the devices can be blocked until they are deleted */ - fc_queue_devloss_work(shost, &rport->dev_loss_work, timeout * HZ); + fc_queue_devloss_work(shost, rport, &rport->dev_loss_work, + timeout * HZ); } EXPORT_SYMBOL(fc_remote_port_delete); @@ -3514,9 +3526,9 @@ fc_remote_port_rolechg(struct fc_rport *rport, u32 roles) * transaction. */ if (!cancel_delayed_work(&rport->fail_io_work)) - fc_flush_devloss(shost); + fc_flush_devloss(shost, rport); if (!cancel_delayed_work(&rport->dev_loss_work)) - fc_flush_devloss(shost); + fc_flush_devloss(shost, rport); spin_lock_irqsave(shost->host_lock, flags); rport->flags &= ~(FC_RPORT_FAST_FAIL_TIMEDOUT | diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index c75a806496d6..743b4c792ceb 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -2143,6 +2143,8 @@ static int iscsi_iter_destroy_conn_fn(struct device *dev, void *data) return 0; iscsi_remove_conn(iscsi_dev_to_conn(dev)); + iscsi_put_conn(iscsi_dev_to_conn(dev)); + return 0; } diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c index 351b028ef893..d69c7c444a31 100644 --- a/drivers/scsi/scsi_transport_sas.c +++ b/drivers/scsi/scsi_transport_sas.c @@ -40,6 +40,8 @@ #include #include "scsi_sas_internal.h" +#include "scsi_priv.h" + struct sas_host_attrs { struct list_head rphy_list; struct mutex lock; @@ -1683,32 +1685,66 @@ int scsi_is_sas_rphy(const struct device *dev) } EXPORT_SYMBOL(scsi_is_sas_rphy); - -/* - * SCSI scan helper - */ - -static int sas_user_scan(struct Scsi_Host *shost, uint channel, - uint id, u64 lun) +static void scan_channel_zero(struct Scsi_Host *shost, uint id, u64 lun) { struct sas_host_attrs *sas_host = to_sas_host_attrs(shost); struct sas_rphy *rphy; - mutex_lock(&sas_host->lock); list_for_each_entry(rphy, &sas_host->rphy_list, list) { if (rphy->identify.device_type != SAS_END_DEVICE || rphy->scsi_target_id == -1) continue; - if ((channel == SCAN_WILD_CARD || channel == 0) && - (id == SCAN_WILD_CARD || id == rphy->scsi_target_id)) { + if (id == SCAN_WILD_CARD || id == rphy->scsi_target_id) { scsi_scan_target(&rphy->dev, 0, rphy->scsi_target_id, lun, SCSI_SCAN_MANUAL); } } - mutex_unlock(&sas_host->lock); +} - return 0; +/* + * SCSI scan helper + */ + +static int sas_user_scan(struct Scsi_Host *shost, uint channel, + uint id, u64 lun) +{ + struct sas_host_attrs *sas_host = to_sas_host_attrs(shost); + int res = 0; + int i; + + switch (channel) { + case 0: + mutex_lock(&sas_host->lock); + scan_channel_zero(shost, id, lun); + mutex_unlock(&sas_host->lock); + break; + + case SCAN_WILD_CARD: + mutex_lock(&sas_host->lock); + scan_channel_zero(shost, id, lun); + mutex_unlock(&sas_host->lock); + + for (i = 1; i <= shost->max_channel; i++) { + res = scsi_scan_host_selected(shost, i, id, lun, + SCSI_SCAN_MANUAL); + if (res) + goto exit_scan; + } + break; + + default: + if (channel < shost->max_channel) { + res = scsi_scan_host_selected(shost, channel, id, lun, + SCSI_SCAN_MANUAL); + } else { + res = -EINVAL; + } + break; + } + +exit_scan: + return res; } diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 3f6e87705b62..eff434d8d71d 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -3384,7 +3384,7 @@ static void sd_read_block_limits_ext(struct scsi_disk *sdkp) rcu_read_lock(); vpd = rcu_dereference(sdkp->device->vpd_pgb7); - if (vpd && vpd->len >= 2) + if (vpd && vpd->len >= 6) sdkp->rscs = vpd->data[5] & 1; rcu_read_unlock(); } @@ -3459,19 +3459,14 @@ static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer) } if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, INQUIRY, 0) < 0) { - struct scsi_vpd *vpd; - sdev->no_report_opcodes = 1; - /* Disable WRITE SAME if REPORT SUPPORTED OPERATION - * CODES is unsupported and the device has an ATA - * Information VPD page (SAT). + /* + * Disable WRITE SAME if REPORT SUPPORTED OPERATION CODES is + * unsupported and this is an ATA device. */ - rcu_read_lock(); - vpd = rcu_dereference(sdev->vpd_pg89); - if (vpd) + if (sdev->is_ata) sdev->no_write_same = 1; - rcu_read_unlock(); } if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, WRITE_SAME_16, 0) == 1) @@ -4173,7 +4168,9 @@ static void sd_shutdown(struct device *dev) if ((system_state != SYSTEM_RESTART && sdkp->device->manage_system_start_stop) || (system_state == SYSTEM_POWER_OFF && - sdkp->device->manage_shutdown)) { + sdkp->device->manage_shutdown) || + (system_state == SYSTEM_RUNNING && + sdkp->device->manage_runtime_start_stop)) { sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n"); sd_start_stop_device(sdkp, 0); } diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c index 43f47e3aa448..ec7bc6e30228 100644 --- a/drivers/target/target_core_fabric_lib.c +++ b/drivers/target/target_core_fabric_lib.c @@ -257,11 +257,41 @@ static int iscsi_get_pr_transport_id_len( return len; } -static char *iscsi_parse_pr_out_transport_id( +static void sas_parse_pr_out_transport_id(char *buf, char *i_str) +{ + char hex[17] = {}; + + bin2hex(hex, buf + 4, 8); + snprintf(i_str, TRANSPORT_IQN_LEN, "naa.%s", hex); +} + +static void srp_parse_pr_out_transport_id(char *buf, char *i_str) +{ + char hex[33] = {}; + + bin2hex(hex, buf + 8, 16); + snprintf(i_str, TRANSPORT_IQN_LEN, "0x%s", hex); +} + +static void fcp_parse_pr_out_transport_id(char *buf, char *i_str) +{ + snprintf(i_str, TRANSPORT_IQN_LEN, "%8phC", buf + 8); +} + +static void sbp_parse_pr_out_transport_id(char *buf, char *i_str) +{ + char hex[17] = {}; + + bin2hex(hex, buf + 8, 8); + snprintf(i_str, TRANSPORT_IQN_LEN, "%s", hex); +} + +static bool iscsi_parse_pr_out_transport_id( struct se_portal_group *se_tpg, char *buf, u32 *out_tid_len, - char **port_nexus_ptr) + char **port_nexus_ptr, + char *i_str) { char *p; int i; @@ -282,7 +312,7 @@ static char *iscsi_parse_pr_out_transport_id( if ((format_code != 0x00) && (format_code != 0x40)) { pr_err("Illegal format code: 0x%02x for iSCSI" " Initiator Transport ID\n", format_code); - return NULL; + return false; } /* * If the caller wants the TransportID Length, we set that value for the @@ -306,7 +336,7 @@ static char *iscsi_parse_pr_out_transport_id( pr_err("Unable to locate \",i,0x\" separator" " for Initiator port identifier: %s\n", &buf[4]); - return NULL; + return false; } *p = '\0'; /* Terminate iSCSI Name */ p += 5; /* Skip over ",i,0x" separator */ @@ -339,7 +369,8 @@ static char *iscsi_parse_pr_out_transport_id( } else *port_nexus_ptr = NULL; - return &buf[4]; + strscpy(i_str, &buf[4], TRANSPORT_IQN_LEN); + return true; } int target_get_pr_transport_id_len(struct se_node_acl *nacl, @@ -387,33 +418,35 @@ int target_get_pr_transport_id(struct se_node_acl *nacl, } } -const char *target_parse_pr_out_transport_id(struct se_portal_group *tpg, - char *buf, u32 *out_tid_len, char **port_nexus_ptr) +bool target_parse_pr_out_transport_id(struct se_portal_group *tpg, + char *buf, u32 *out_tid_len, char **port_nexus_ptr, char *i_str) { - u32 offset; - switch (tpg->proto_id) { case SCSI_PROTOCOL_SAS: /* * Assume the FORMAT CODE 00b from spc4r17, 7.5.4.7 TransportID * for initiator ports using SCSI over SAS Serial SCSI Protocol. */ - offset = 4; + sas_parse_pr_out_transport_id(buf, i_str); break; - case SCSI_PROTOCOL_SBP: case SCSI_PROTOCOL_SRP: + srp_parse_pr_out_transport_id(buf, i_str); + break; case SCSI_PROTOCOL_FCP: - offset = 8; + fcp_parse_pr_out_transport_id(buf, i_str); + break; + case SCSI_PROTOCOL_SBP: + sbp_parse_pr_out_transport_id(buf, i_str); break; case SCSI_PROTOCOL_ISCSI: return iscsi_parse_pr_out_transport_id(tpg, buf, out_tid_len, - port_nexus_ptr); + port_nexus_ptr, i_str); default: pr_err("Unknown proto_id: 0x%02x\n", tpg->proto_id); - return NULL; + return false; } *port_nexus_ptr = NULL; *out_tid_len = 24; - return buf + offset; + return true; } diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 73564efd11d2..66c292b7d74b 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -64,6 +64,7 @@ static struct se_device *iblock_alloc_device(struct se_hba *hba, const char *nam pr_err("Unable to allocate struct iblock_dev\n"); return NULL; } + ib_dev->ibd_exclusive = true; ib_dev->ibd_plug = kcalloc(nr_cpu_ids, sizeof(*ib_dev->ibd_plug), GFP_KERNEL); @@ -95,6 +96,7 @@ static int iblock_configure_device(struct se_device *dev) struct block_device *bd; struct blk_integrity *bi; blk_mode_t mode = BLK_OPEN_READ; + void *holder = ib_dev; unsigned int max_write_zeroes_sectors; int ret; @@ -109,15 +111,18 @@ static int iblock_configure_device(struct se_device *dev) goto out; } - pr_debug( "IBLOCK: Claiming struct block_device: %s\n", - ib_dev->ibd_udev_path); + pr_debug("IBLOCK: Claiming struct block_device: %s: %d\n", + ib_dev->ibd_udev_path, ib_dev->ibd_exclusive); if (!ib_dev->ibd_readonly) mode |= BLK_OPEN_WRITE; else dev->dev_flags |= DF_READ_ONLY; - bdev_file = bdev_file_open_by_path(ib_dev->ibd_udev_path, mode, ib_dev, + if (!ib_dev->ibd_exclusive) + holder = NULL; + + bdev_file = bdev_file_open_by_path(ib_dev->ibd_udev_path, mode, holder, NULL); if (IS_ERR(bdev_file)) { ret = PTR_ERR(bdev_file); @@ -560,13 +565,14 @@ iblock_execute_write_same(struct se_cmd *cmd) } enum { - Opt_udev_path, Opt_readonly, Opt_force, Opt_err + Opt_udev_path, Opt_readonly, Opt_force, Opt_exclusive, Opt_err, }; static match_table_t tokens = { {Opt_udev_path, "udev_path=%s"}, {Opt_readonly, "readonly=%d"}, {Opt_force, "force=%d"}, + {Opt_exclusive, "exclusive=%d"}, {Opt_err, NULL} }; @@ -576,7 +582,7 @@ static ssize_t iblock_set_configfs_dev_params(struct se_device *dev, struct iblock_dev *ib_dev = IBLOCK_DEV(dev); char *orig, *ptr, *arg_p, *opts; substring_t args[MAX_OPT_ARGS]; - int ret = 0, token; + int ret = 0, token, tmp_exclusive; unsigned long tmp_readonly; opts = kstrdup(page, GFP_KERNEL); @@ -623,6 +629,22 @@ static ssize_t iblock_set_configfs_dev_params(struct se_device *dev, ib_dev->ibd_readonly = tmp_readonly; pr_debug("IBLOCK: readonly: %d\n", ib_dev->ibd_readonly); break; + case Opt_exclusive: + arg_p = match_strdup(&args[0]); + if (!arg_p) { + ret = -ENOMEM; + break; + } + ret = kstrtoint(arg_p, 0, &tmp_exclusive); + kfree(arg_p); + if (ret < 0) { + pr_err("kstrtoul() failed for exclusive=\n"); + goto out; + } + ib_dev->ibd_exclusive = tmp_exclusive; + pr_debug("IBLOCK: exclusive: %d\n", + ib_dev->ibd_exclusive); + break; case Opt_force: break; default: @@ -647,6 +669,7 @@ static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b) bl += sprintf(b + bl, " UDEV PATH: %s", ib_dev->ibd_udev_path); bl += sprintf(b + bl, " readonly: %d\n", ib_dev->ibd_readonly); + bl += sprintf(b + bl, " exclusive: %d\n", ib_dev->ibd_exclusive); bl += sprintf(b + bl, " "); if (bd) { diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h index 91f6f4280666..e2f28a69a11c 100644 --- a/drivers/target/target_core_iblock.h +++ b/drivers/target/target_core_iblock.h @@ -34,6 +34,7 @@ struct iblock_dev { struct block_device *ibd_bd; struct file *ibd_bdev_file; bool ibd_readonly; + bool ibd_exclusive; struct iblock_dev_plug *ibd_plug; } ____cacheline_aligned; diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h index 408be26d2e9b..20aab1f50565 100644 --- a/drivers/target/target_core_internal.h +++ b/drivers/target/target_core_internal.h @@ -103,8 +103,8 @@ int target_get_pr_transport_id_len(struct se_node_acl *nacl, int target_get_pr_transport_id(struct se_node_acl *nacl, struct t10_pr_registration *pr_reg, int *format_code, unsigned char *buf); -const char *target_parse_pr_out_transport_id(struct se_portal_group *tpg, - char *buf, u32 *out_tid_len, char **port_nexus_ptr); +bool target_parse_pr_out_transport_id(struct se_portal_group *tpg, + char *buf, u32 *out_tid_len, char **port_nexus_ptr, char *i_str); /* target_core_hba.c */ struct se_hba *core_alloc_hba(const char *, u32, u32); diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index 70905805cb17..83e172c92238 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -1478,11 +1478,12 @@ core_scsi3_decode_spec_i_port( LIST_HEAD(tid_dest_list); struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp; unsigned char *buf, *ptr, proto_ident; - const unsigned char *i_str = NULL; + unsigned char i_str[TRANSPORT_IQN_LEN]; char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN]; sense_reason_t ret; u32 tpdl, tid_len = 0; u32 dest_rtpi = 0; + bool tid_found; /* * Allocate a struct pr_transport_id_holder and setup the @@ -1571,9 +1572,9 @@ core_scsi3_decode_spec_i_port( dest_rtpi = tmp_lun->lun_tpg->tpg_rtpi; iport_ptr = NULL; - i_str = target_parse_pr_out_transport_id(tmp_tpg, - ptr, &tid_len, &iport_ptr); - if (!i_str) + tid_found = target_parse_pr_out_transport_id(tmp_tpg, + ptr, &tid_len, &iport_ptr, i_str); + if (!tid_found) continue; /* * Determine if this SCSI device server requires that @@ -3153,13 +3154,14 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key, struct t10_pr_registration *pr_reg, *pr_res_holder, *dest_pr_reg; struct t10_reservation *pr_tmpl = &dev->t10_pr; unsigned char *buf; - const unsigned char *initiator_str; + unsigned char initiator_str[TRANSPORT_IQN_LEN]; char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN] = { }; u32 tid_len, tmp_tid_len; int new_reg = 0, type, scope, matching_iname; sense_reason_t ret; unsigned short rtpi; unsigned char proto_ident; + bool tid_found; if (!se_sess || !se_lun) { pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n"); @@ -3278,9 +3280,9 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key, ret = TCM_INVALID_PARAMETER_LIST; goto out; } - initiator_str = target_parse_pr_out_transport_id(dest_se_tpg, - &buf[24], &tmp_tid_len, &iport_ptr); - if (!initiator_str) { + tid_found = target_parse_pr_out_transport_id(dest_se_tpg, + &buf[24], &tmp_tid_len, &iport_ptr, initiator_str); + if (!tid_found) { pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate" " initiator_str from Transport ID\n"); ret = TCM_INVALID_PARAMETER_LIST; diff --git a/drivers/ufs/core/ufs-sysfs.c b/drivers/ufs/core/ufs-sysfs.c index de8b6acd4058..4bd7d491e3c5 100644 --- a/drivers/ufs/core/ufs-sysfs.c +++ b/drivers/ufs/core/ufs-sysfs.c @@ -5,6 +5,7 @@ #include #include #include +#include #include #include @@ -87,6 +88,23 @@ static const char *ufs_wb_resize_status_to_string(enum wb_resize_status status) } } +static const char * const ufs_hid_states[] = { + [HID_IDLE] = "idle", + [ANALYSIS_IN_PROGRESS] = "analysis_in_progress", + [DEFRAG_REQUIRED] = "defrag_required", + [DEFRAG_IN_PROGRESS] = "defrag_in_progress", + [DEFRAG_COMPLETED] = "defrag_completed", + [DEFRAG_NOT_REQUIRED] = "defrag_not_required", +}; + +static const char *ufs_hid_state_to_string(enum ufs_hid_state state) +{ + if (state < NUM_UFS_HID_STATES) + return ufs_hid_states[state]; + + return "unknown"; +} + static const char *ufshcd_uic_link_state_to_string( enum uic_link_state state) { @@ -1499,7 +1517,7 @@ static ssize_t _name##_show(struct device *dev, \ ret = -EINVAL; \ goto out; \ } \ - ret = sysfs_emit(buf, "%s\n", flag ? "true" : "false"); \ + ret = sysfs_emit(buf, "%s\n", str_true_false(flag)); \ out: \ up(&hba->host_sem); \ return ret; \ @@ -1763,6 +1781,178 @@ static const struct attribute_group ufs_sysfs_attributes_group = { .attrs = ufs_sysfs_attributes, }; +static int hid_query_attr(struct ufs_hba *hba, enum query_opcode opcode, + enum attr_idn idn, u32 *attr_val) +{ + int ret; + + down(&hba->host_sem); + if (!ufshcd_is_user_access_allowed(hba)) { + up(&hba->host_sem); + return -EBUSY; + } + + ufshcd_rpm_get_sync(hba); + ret = ufshcd_query_attr(hba, opcode, idn, 0, 0, attr_val); + ufshcd_rpm_put_sync(hba); + + up(&hba->host_sem); + return ret; +} + +static ssize_t analysis_trigger_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + int mode; + int ret; + + if (sysfs_streq(buf, "enable")) + mode = HID_ANALYSIS_ENABLE; + else if (sysfs_streq(buf, "disable")) + mode = HID_ANALYSIS_AND_DEFRAG_DISABLE; + else + return -EINVAL; + + ret = hid_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, + QUERY_ATTR_IDN_HID_DEFRAG_OPERATION, &mode); + + return ret < 0 ? ret : count; +} + +static DEVICE_ATTR_WO(analysis_trigger); + +static ssize_t defrag_trigger_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + int mode; + int ret; + + if (sysfs_streq(buf, "enable")) + mode = HID_ANALYSIS_AND_DEFRAG_ENABLE; + else if (sysfs_streq(buf, "disable")) + mode = HID_ANALYSIS_AND_DEFRAG_DISABLE; + else + return -EINVAL; + + ret = hid_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, + QUERY_ATTR_IDN_HID_DEFRAG_OPERATION, &mode); + + return ret < 0 ? ret : count; +} + +static DEVICE_ATTR_WO(defrag_trigger); + +static ssize_t fragmented_size_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + u32 value; + int ret; + + ret = hid_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, + QUERY_ATTR_IDN_HID_AVAILABLE_SIZE, &value); + if (ret) + return ret; + + return sysfs_emit(buf, "%u\n", value); +} + +static DEVICE_ATTR_RO(fragmented_size); + +static ssize_t defrag_size_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + u32 value; + int ret; + + ret = hid_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, + QUERY_ATTR_IDN_HID_SIZE, &value); + if (ret) + return ret; + + return sysfs_emit(buf, "%u\n", value); +} + +static ssize_t defrag_size_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + u32 value; + int ret; + + if (kstrtou32(buf, 0, &value)) + return -EINVAL; + + ret = hid_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, + QUERY_ATTR_IDN_HID_SIZE, &value); + + return ret < 0 ? ret : count; +} + +static DEVICE_ATTR_RW(defrag_size); + +static ssize_t progress_ratio_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + u32 value; + int ret; + + ret = hid_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, + QUERY_ATTR_IDN_HID_PROGRESS_RATIO, &value); + if (ret) + return ret; + + return sysfs_emit(buf, "%u\n", value); +} + +static DEVICE_ATTR_RO(progress_ratio); + +static ssize_t state_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + u32 value; + int ret; + + ret = hid_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, + QUERY_ATTR_IDN_HID_STATE, &value); + if (ret) + return ret; + + return sysfs_emit(buf, "%s\n", ufs_hid_state_to_string(value)); +} + +static DEVICE_ATTR_RO(state); + +static struct attribute *ufs_sysfs_hid[] = { + &dev_attr_analysis_trigger.attr, + &dev_attr_defrag_trigger.attr, + &dev_attr_fragmented_size.attr, + &dev_attr_defrag_size.attr, + &dev_attr_progress_ratio.attr, + &dev_attr_state.attr, + NULL, +}; + +static umode_t ufs_sysfs_hid_is_visible(struct kobject *kobj, + struct attribute *attr, int n) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct ufs_hba *hba = dev_get_drvdata(dev); + + return hba->dev_info.hid_sup ? attr->mode : 0; +} + +static const struct attribute_group ufs_sysfs_hid_group = { + .name = "hid", + .attrs = ufs_sysfs_hid, + .is_visible = ufs_sysfs_hid_is_visible, +}; + static const struct attribute_group *ufs_sysfs_groups[] = { &ufs_sysfs_default_group, &ufs_sysfs_capabilities_group, @@ -1777,6 +1967,7 @@ static const struct attribute_group *ufs_sysfs_groups[] = { &ufs_sysfs_string_descriptors_group, &ufs_sysfs_flags_group, &ufs_sysfs_attributes_group, + &ufs_sysfs_hid_group, NULL, }; @@ -1808,7 +1999,7 @@ UFS_UNIT_DESC_PARAM(logical_block_size, _LOGICAL_BLK_SIZE, 1); UFS_UNIT_DESC_PARAM(logical_block_count, _LOGICAL_BLK_COUNT, 8); UFS_UNIT_DESC_PARAM(erase_block_size, _ERASE_BLK_SIZE, 4); UFS_UNIT_DESC_PARAM(provisioning_type, _PROVISIONING_TYPE, 1); -UFS_UNIT_DESC_PARAM(physical_memory_resourse_count, _PHY_MEM_RSRC_CNT, 8); +UFS_UNIT_DESC_PARAM(physical_memory_resource_count, _PHY_MEM_RSRC_CNT, 8); UFS_UNIT_DESC_PARAM(context_capabilities, _CTX_CAPABILITIES, 2); UFS_UNIT_DESC_PARAM(large_unit_granularity, _LARGE_UNIT_SIZE_M1, 1); UFS_UNIT_DESC_PARAM(wb_buf_alloc_units, _WB_BUF_ALLOC_UNITS, 4); @@ -1825,7 +2016,7 @@ static struct attribute *ufs_sysfs_unit_descriptor[] = { &dev_attr_logical_block_count.attr, &dev_attr_erase_block_size.attr, &dev_attr_provisioning_type.attr, - &dev_attr_physical_memory_resourse_count.attr, + &dev_attr_physical_memory_resource_count.attr, &dev_attr_context_capabilities.attr, &dev_attr_large_unit_granularity.attr, &dev_attr_wb_buf_alloc_units.attr, diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index f62d89c8e580..96ad57c3144b 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -364,6 +364,34 @@ void ufshcd_disable_irq(struct ufs_hba *hba) } EXPORT_SYMBOL_GPL(ufshcd_disable_irq); +/** + * ufshcd_enable_intr - enable interrupts + * @hba: per adapter instance + * @intrs: interrupt bits + */ +static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs) +{ + u32 old_val = ufshcd_readl(hba, REG_INTERRUPT_ENABLE); + u32 new_val = old_val | intrs; + + if (new_val != old_val) + ufshcd_writel(hba, new_val, REG_INTERRUPT_ENABLE); +} + +/** + * ufshcd_disable_intr - disable interrupts + * @hba: per adapter instance + * @intrs: interrupt bits + */ +static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs) +{ + u32 old_val = ufshcd_readl(hba, REG_INTERRUPT_ENABLE); + u32 new_val = old_val & ~intrs; + + if (new_val != old_val) + ufshcd_writel(hba, new_val, REG_INTERRUPT_ENABLE); +} + static void ufshcd_configure_wb(struct ufs_hba *hba) { if (!ufshcd_is_wb_allowed(hba)) @@ -2566,7 +2594,7 @@ ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) * @hba: per adapter instance * @uic_cmd: UIC command * - * Return: 0 only if success. + * Return: 0 if successful; < 0 upon failure. */ static int __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) @@ -2596,6 +2624,7 @@ __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) */ int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) { + unsigned long flags; int ret; if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD) @@ -2605,6 +2634,10 @@ int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) mutex_lock(&hba->uic_cmd_mutex); ufshcd_add_delay_before_dme_cmd(hba); + spin_lock_irqsave(hba->host->host_lock, flags); + ufshcd_enable_intr(hba, UIC_COMMAND_COMPL); + spin_unlock_irqrestore(hba->host->host_lock, flags); + ret = __ufshcd_send_uic_cmd(hba, uic_cmd); if (!ret) ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd); @@ -2681,32 +2714,6 @@ static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) return ufshcd_crypto_fill_prdt(hba, lrbp); } -/** - * ufshcd_enable_intr - enable interrupts - * @hba: per adapter instance - * @intrs: interrupt bits - */ -static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs) -{ - u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE); - - set |= intrs; - ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE); -} - -/** - * ufshcd_disable_intr - disable interrupts - * @hba: per adapter instance - * @intrs: interrupt bits - */ -static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs) -{ - u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE); - - set &= ~intrs; - ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE); -} - /** * ufshcd_prepare_req_desc_hdr - Fill UTP Transfer request descriptor header according to request * descriptor according to request @@ -2826,8 +2833,6 @@ static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba, /* Copy the Descriptor */ if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC) memcpy(ucd_req_ptr + 1, query->descriptor, len); - - memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); } static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp) @@ -2840,8 +2845,6 @@ static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp) .transaction_code = UPIU_TRANSACTION_NOP_OUT, .task_tag = lrbp->task_tag, }; - - memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); } /** @@ -2867,6 +2870,8 @@ static int ufshcd_compose_devman_upiu(struct ufs_hba *hba, else ret = -EINVAL; + memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); + return ret; } @@ -3074,6 +3079,9 @@ static void ufshcd_setup_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, hba->dev_cmd.type = cmd_type; } +/* + * Return: 0 upon success; < 0 upon failure. + */ static int ufshcd_compose_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag) { @@ -3186,9 +3194,13 @@ ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) break; } + WARN_ONCE(err > 0, "Incorrect return value %d > 0\n", err); return err; } +/* + * Return: 0 upon success; < 0 upon failure. + */ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, int max_timeout) { @@ -3263,6 +3275,7 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba, } } + WARN_ONCE(err > 0, "Incorrect return value %d > 0\n", err); return err; } @@ -3280,6 +3293,9 @@ static void ufshcd_dev_man_unlock(struct ufs_hba *hba) ufshcd_release(hba); } +/* + * Return: 0 upon success; < 0 upon failure. + */ static int ufshcd_issue_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, const u32 tag, int timeout) { @@ -3367,6 +3383,7 @@ static int ufshcd_query_flag_retry(struct ufs_hba *hba, dev_err(hba->dev, "%s: query flag, opcode %d, idn %d, failed with error %d after %d retries\n", __func__, opcode, idn, ret, retries); + WARN_ONCE(ret > 0, "Incorrect return value %d > 0\n", ret); return ret; } @@ -3378,7 +3395,7 @@ static int ufshcd_query_flag_retry(struct ufs_hba *hba, * @index: flag index to access * @flag_res: the flag value after the query request completes * - * Return: 0 for success, non-zero in case of failure. + * Return: 0 for success; < 0 upon failure. */ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, enum flag_idn idn, u8 index, bool *flag_res) @@ -3434,6 +3451,7 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, out_unlock: ufshcd_dev_man_unlock(hba); + WARN_ONCE(err > 0, "Incorrect return value %d > 0\n", err); return err; } @@ -3446,7 +3464,7 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, * @selector: selector field * @attr_val: the attribute value after the query request completes * - * Return: 0 for success, non-zero in case of failure. + * Return: 0 upon success; < 0 upon failure. */ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector, u32 *attr_val) @@ -3495,6 +3513,7 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, out_unlock: ufshcd_dev_man_unlock(hba); + WARN_ONCE(err > 0, "Incorrect return value %d > 0\n", err); return err; } @@ -3509,7 +3528,7 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, * @attr_val: the attribute value after the query request * completes * - * Return: 0 for success, non-zero in case of failure. + * Return: 0 for success; < 0 upon failure. */ int ufshcd_query_attr_retry(struct ufs_hba *hba, enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector, @@ -3532,9 +3551,13 @@ int ufshcd_query_attr_retry(struct ufs_hba *hba, dev_err(hba->dev, "%s: query attribute, idn %d, failed with error %d after %d retries\n", __func__, idn, ret, QUERY_REQ_RETRIES); + WARN_ONCE(ret > 0, "Incorrect return value %d > 0\n", ret); return ret; } +/* + * Return: 0 if successful; < 0 upon failure. + */ static int __ufshcd_query_descriptor(struct ufs_hba *hba, enum query_opcode opcode, enum desc_idn idn, u8 index, u8 selector, u8 *desc_buf, int *buf_len) @@ -3592,6 +3615,7 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba, out_unlock: hba->dev_cmd.query.descriptor = NULL; ufshcd_dev_man_unlock(hba); + WARN_ONCE(err > 0, "Incorrect return value %d > 0\n", err); return err; } @@ -3608,7 +3632,7 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba, * The buf_len parameter will contain, on return, the length parameter * received on the response. * - * Return: 0 for success, non-zero in case of failure. + * Return: 0 for success; < 0 upon failure. */ int ufshcd_query_descriptor_retry(struct ufs_hba *hba, enum query_opcode opcode, @@ -3626,6 +3650,7 @@ int ufshcd_query_descriptor_retry(struct ufs_hba *hba, break; } + WARN_ONCE(err > 0, "Incorrect return value %d > 0\n", err); return err; } @@ -3638,7 +3663,7 @@ int ufshcd_query_descriptor_retry(struct ufs_hba *hba, * @param_read_buf: pointer to buffer where parameter would be read * @param_size: sizeof(param_read_buf) * - * Return: 0 in case of success, non-zero otherwise. + * Return: 0 in case of success; < 0 upon failure. */ int ufshcd_read_desc_param(struct ufs_hba *hba, enum desc_idn desc_id, @@ -3705,6 +3730,7 @@ int ufshcd_read_desc_param(struct ufs_hba *hba, out: if (is_kmalloc) kfree(desc_buf); + WARN_ONCE(ret > 0, "Incorrect return value %d > 0\n", ret); return ret; } @@ -3818,7 +3844,7 @@ int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index, * @param_read_buf: pointer to buffer where parameter would be read * @param_size: sizeof(param_read_buf) * - * Return: 0 in case of success, non-zero otherwise. + * Return: 0 in case of success; < 0 upon failure. */ static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba, int lun, @@ -4253,6 +4279,30 @@ int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, } EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr); +/** + * ufshcd_dme_rmw - get modify set a DME attribute + * @hba: per adapter instance + * @mask: indicates which bits to clear from the value that has been read + * @val: actual value to write + * @attr: dme attribute + */ +int ufshcd_dme_rmw(struct ufs_hba *hba, u32 mask, + u32 val, u32 attr) +{ + u32 cfg = 0; + int err; + + err = ufshcd_dme_get(hba, UIC_ARG_MIB(attr), &cfg); + if (err) + return err; + + cfg &= ~mask; + cfg |= (val & mask); + + return ufshcd_dme_set(hba, UIC_ARG_MIB(attr), cfg); +} +EXPORT_SYMBOL_GPL(ufshcd_dme_rmw); + /** * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power * state) and waits for it to take effect. @@ -4275,7 +4325,6 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd) unsigned long flags; u8 status; int ret; - bool reenable_intr = false; mutex_lock(&hba->uic_cmd_mutex); ufshcd_add_delay_before_dme_cmd(hba); @@ -4286,15 +4335,7 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd) goto out_unlock; } hba->uic_async_done = &uic_async_done; - if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) { - ufshcd_disable_intr(hba, UIC_COMMAND_COMPL); - /* - * Make sure UIC command completion interrupt is disabled before - * issuing UIC command. - */ - ufshcd_readl(hba, REG_INTERRUPT_ENABLE); - reenable_intr = true; - } + ufshcd_disable_intr(hba, UIC_COMMAND_COMPL); spin_unlock_irqrestore(hba->host->host_lock, flags); ret = __ufshcd_send_uic_cmd(hba, cmd); if (ret) { @@ -4338,9 +4379,7 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd) spin_lock_irqsave(hba->host->host_lock, flags); hba->active_uic_cmd = NULL; hba->uic_async_done = NULL; - if (reenable_intr) - ufshcd_enable_intr(hba, UIC_COMMAND_COMPL); - if (ret) { + if (ret && !hba->pm_op_in_progress) { ufshcd_set_link_broken(hba); ufshcd_schedule_eh_work(hba); } @@ -4348,6 +4387,14 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd) spin_unlock_irqrestore(hba->host->host_lock, flags); mutex_unlock(&hba->uic_cmd_mutex); + /* + * If the h8 exit fails during the runtime resume process, it becomes + * stuck and cannot be recovered through the error handler. To fix + * this, use link recovery instead of the error handler. + */ + if (ret && hba->pm_op_in_progress) + ret = ufshcd_link_recovery(hba); + return ret; } @@ -4362,28 +4409,17 @@ int ufshcd_send_bsg_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) { int ret; + if (uic_cmd->argument1 != UIC_ARG_MIB(PA_PWRMODE) || + uic_cmd->command != UIC_CMD_DME_SET) + return ufshcd_send_uic_cmd(hba, uic_cmd); + if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD) return 0; ufshcd_hold(hba); - - if (uic_cmd->argument1 == UIC_ARG_MIB(PA_PWRMODE) && - uic_cmd->command == UIC_CMD_DME_SET) { - ret = ufshcd_uic_pwr_ctrl(hba, uic_cmd); - goto out; - } - - mutex_lock(&hba->uic_cmd_mutex); - ufshcd_add_delay_before_dme_cmd(hba); - - ret = __ufshcd_send_uic_cmd(hba, uic_cmd); - if (!ret) - ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd); - - mutex_unlock(&hba->uic_cmd_mutex); - -out: + ret = ufshcd_uic_pwr_ctrl(hba, uic_cmd); ufshcd_release(hba); + return ret; } @@ -4796,7 +4832,7 @@ static int ufshcd_complete_dev_init(struct ufs_hba *hba) * 3. Program UTRL and UTMRL base address * 4. Configure run-stop-registers * - * Return: 0 on success, non-zero value on failure. + * Return: 0 if successful; < 0 upon failure. */ int ufshcd_make_hba_operational(struct ufs_hba *hba) { @@ -7807,7 +7843,8 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba) hba->silence_err_logs = false; /* scale up clocks to max frequency before full reinitialization */ - ufshcd_scale_clks(hba, ULONG_MAX, true); + if (ufshcd_is_clkscaling_supported(hba)) + ufshcd_scale_clks(hba, ULONG_MAX, true); err = ufshcd_hba_enable(hba); @@ -8419,6 +8456,10 @@ static int ufs_get_device_desc(struct ufs_hba *hba) dev_info->rtt_cap = desc_buf[DEVICE_DESC_PARAM_RTT_CAP]; + dev_info->hid_sup = get_unaligned_be32(desc_buf + + DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP) & + UFS_DEV_HID_SUPPORT; + model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME]; err = ufshcd_read_string_desc(hba, model_index, diff --git a/drivers/ufs/host/ufs-exynos.c b/drivers/ufs/host/ufs-exynos.c index 3e545af536e5..f0adcd9dd553 100644 --- a/drivers/ufs/host/ufs-exynos.c +++ b/drivers/ufs/host/ufs-exynos.c @@ -1110,8 +1110,8 @@ static int exynos_ufs_post_link(struct ufs_hba *hba) hci_writel(ufs, val, HCI_TXPRDT_ENTRY_SIZE); hci_writel(ufs, ilog2(DATA_UNIT_SIZE), HCI_RXPRDT_ENTRY_SIZE); - hci_writel(ufs, (1 << hba->nutrs) - 1, HCI_UTRL_NEXUS_TYPE); - hci_writel(ufs, (1 << hba->nutmrs) - 1, HCI_UTMRL_NEXUS_TYPE); + hci_writel(ufs, BIT(hba->nutrs) - 1, HCI_UTRL_NEXUS_TYPE); + hci_writel(ufs, BIT(hba->nutmrs) - 1, HCI_UTMRL_NEXUS_TYPE); hci_writel(ufs, 0xf, HCI_AXIDMA_RWDATA_BURST_LEN); if (ufs->opts & EXYNOS_UFS_OPT_SKIP_CONNECTION_ESTAB) diff --git a/drivers/ufs/host/ufs-mediatek.c b/drivers/ufs/host/ufs-mediatek.c index 182f58d0c9db..86ae73b89d4d 100644 --- a/drivers/ufs/host/ufs-mediatek.c +++ b/drivers/ufs/host/ufs-mediatek.c @@ -50,6 +50,7 @@ static const struct ufs_dev_quirk ufs_mtk_dev_fixups[] = { static const struct of_device_id ufs_mtk_of_match[] = { { .compatible = "mediatek,mt8183-ufshci" }, + { .compatible = "mediatek,mt8195-ufshci" }, {}, }; MODULE_DEVICE_TABLE(of, ufs_mtk_of_match); @@ -96,49 +97,59 @@ static bool ufs_mtk_is_boost_crypt_enabled(struct ufs_hba *hba) { struct ufs_mtk_host *host = ufshcd_get_variant(hba); - return !!(host->caps & UFS_MTK_CAP_BOOST_CRYPT_ENGINE); + return host->caps & UFS_MTK_CAP_BOOST_CRYPT_ENGINE; } static bool ufs_mtk_is_va09_supported(struct ufs_hba *hba) { struct ufs_mtk_host *host = ufshcd_get_variant(hba); - return !!(host->caps & UFS_MTK_CAP_VA09_PWR_CTRL); + return host->caps & UFS_MTK_CAP_VA09_PWR_CTRL; } static bool ufs_mtk_is_broken_vcc(struct ufs_hba *hba) { struct ufs_mtk_host *host = ufshcd_get_variant(hba); - return !!(host->caps & UFS_MTK_CAP_BROKEN_VCC); + return host->caps & UFS_MTK_CAP_BROKEN_VCC; } static bool ufs_mtk_is_pmc_via_fastauto(struct ufs_hba *hba) { struct ufs_mtk_host *host = ufshcd_get_variant(hba); - return !!(host->caps & UFS_MTK_CAP_PMC_VIA_FASTAUTO); + return host->caps & UFS_MTK_CAP_PMC_VIA_FASTAUTO; } static bool ufs_mtk_is_tx_skew_fix(struct ufs_hba *hba) { struct ufs_mtk_host *host = ufshcd_get_variant(hba); - return (host->caps & UFS_MTK_CAP_TX_SKEW_FIX); + return host->caps & UFS_MTK_CAP_TX_SKEW_FIX; } static bool ufs_mtk_is_rtff_mtcmos(struct ufs_hba *hba) { struct ufs_mtk_host *host = ufshcd_get_variant(hba); - return (host->caps & UFS_MTK_CAP_RTFF_MTCMOS); + return host->caps & UFS_MTK_CAP_RTFF_MTCMOS; } static bool ufs_mtk_is_allow_vccqx_lpm(struct ufs_hba *hba) { struct ufs_mtk_host *host = ufshcd_get_variant(hba); - return (host->caps & UFS_MTK_CAP_ALLOW_VCCQX_LPM); + return host->caps & UFS_MTK_CAP_ALLOW_VCCQX_LPM; +} + +static bool ufs_mtk_is_clk_scale_ready(struct ufs_hba *hba) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + struct ufs_mtk_clk *mclk = &host->mclk; + + return mclk->ufs_sel_clki && + mclk->ufs_sel_max_clki && + mclk->ufs_sel_min_clki; } static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable) @@ -267,6 +278,13 @@ static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba, ufshcd_writel(hba, ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) | 0x80, REG_UFS_XOUFS_CTRL); + + /* DDR_EN setting */ + if (host->ip_ver >= IP_VER_MT6989) { + ufshcd_rmwl(hba, UFS_MASK(0x7FFF, 8), + 0x453000, REG_UFS_MMIO_OPT_CTRL_0); + } + } return 0; @@ -344,7 +362,16 @@ static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on) dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value); - ufs_mtk_ref_clk_notify(host->ref_clk_enabled, POST_CHANGE, res); + /* + * If clock on timeout, assume clock is off, notify tfa do clock + * off setting.(keep DIFN disable, release resource) + * If clock off timeout, assume clock will off finally, + * set ref_clk_enabled directly.(keep DIFN disable, keep resource) + */ + if (on) + ufs_mtk_ref_clk_notify(false, POST_CHANGE, res); + else + host->ref_clk_enabled = false; return -ETIMEDOUT; @@ -663,6 +690,9 @@ static void ufs_mtk_init_host_caps(struct ufs_hba *hba) if (of_property_read_bool(np, "mediatek,ufs-rtff-mtcmos")) host->caps |= UFS_MTK_CAP_RTFF_MTCMOS; + if (of_property_read_bool(np, "mediatek,ufs-broken-rtc")) + host->caps |= UFS_MTK_CAP_MCQ_BROKEN_RTC; + dev_info(hba->dev, "caps: 0x%x", host->caps); } @@ -779,6 +809,91 @@ static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on, return ret; } +static u32 ufs_mtk_mcq_get_irq(struct ufs_hba *hba, unsigned int cpu) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + struct blk_mq_tag_set *tag_set = &hba->host->tag_set; + struct blk_mq_queue_map *map = &tag_set->map[HCTX_TYPE_DEFAULT]; + unsigned int nr = map->nr_queues; + unsigned int q_index; + + q_index = map->mq_map[cpu]; + if (q_index > nr) { + dev_err(hba->dev, "hwq index %d exceed %d\n", + q_index, nr); + return MTK_MCQ_INVALID_IRQ; + } + + return host->mcq_intr_info[q_index].irq; +} + +static void ufs_mtk_mcq_set_irq_affinity(struct ufs_hba *hba, unsigned int cpu) +{ + unsigned int irq, _cpu; + int ret; + + irq = ufs_mtk_mcq_get_irq(hba, cpu); + if (irq == MTK_MCQ_INVALID_IRQ) { + dev_err(hba->dev, "invalid irq. unable to bind irq to cpu%d", cpu); + return; + } + + /* force migrate irq of cpu0 to cpu3 */ + _cpu = (cpu == 0) ? 3 : cpu; + ret = irq_set_affinity(irq, cpumask_of(_cpu)); + if (ret) { + dev_err(hba->dev, "set irq %d affinity to CPU %d failed\n", + irq, _cpu); + return; + } + dev_info(hba->dev, "set irq %d affinity to CPU: %d\n", irq, _cpu); +} + +static bool ufs_mtk_is_legacy_chipset(struct ufs_hba *hba, u32 hw_ip_ver) +{ + bool is_legacy = false; + + switch (hw_ip_ver) { + case IP_LEGACY_VER_MT6893: + case IP_LEGACY_VER_MT6781: + /* can add other legacy chipset ID here accordingly */ + is_legacy = true; + break; + default: + break; + } + dev_info(hba->dev, "legacy IP version - 0x%x, is legacy : %d", hw_ip_ver, is_legacy); + + return is_legacy; +} + +/* + * HW version format has been changed from 01MMmmmm to 1MMMmmmm, since + * project MT6878. In order to perform correct version comparison, + * version number is changed by SW for the following projects. + * IP_VER_MT6983 0x00360000 to 0x10360000 + * IP_VER_MT6897 0x01440000 to 0x10440000 + * IP_VER_MT6989 0x01450000 to 0x10450000 + * IP_VER_MT6991 0x01460000 to 0x10460000 + */ +static void ufs_mtk_get_hw_ip_version(struct ufs_hba *hba) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + u32 hw_ip_ver; + + hw_ip_ver = ufshcd_readl(hba, REG_UFS_MTK_IP_VER); + + if (((hw_ip_ver & (0xFF << 24)) == (0x1 << 24)) || + ((hw_ip_ver & (0xFF << 24)) == 0)) { + hw_ip_ver &= ~(0xFF << 24); + hw_ip_ver |= (0x1 << 28); + } + + host->ip_ver = hw_ip_ver; + + host->legacy_ip_ver = ufs_mtk_is_legacy_chipset(hba, hw_ip_ver); +} + static void ufs_mtk_get_controller_version(struct ufs_hba *hba) { struct ufs_mtk_host *host = ufshcd_get_variant(hba); @@ -818,8 +933,10 @@ static void ufs_mtk_init_clocks(struct ufs_hba *hba) { struct ufs_mtk_host *host = ufshcd_get_variant(hba); struct list_head *head = &hba->clk_list_head; - struct ufs_mtk_clk *mclk = &host->mclk; struct ufs_clk_info *clki, *clki_tmp; + struct device *dev = hba->dev; + struct regulator *reg; + u32 volt; /* * Find private clocks and store them in struct ufs_mtk_clk. @@ -837,15 +954,57 @@ static void ufs_mtk_init_clocks(struct ufs_hba *hba) host->mclk.ufs_sel_min_clki = clki; clk_disable_unprepare(clki->clk); list_del(&clki->list); + } else if (!strcmp(clki->name, "ufs_fde")) { + host->mclk.ufs_fde_clki = clki; + } else if (!strcmp(clki->name, "ufs_fde_max_src")) { + host->mclk.ufs_fde_max_clki = clki; + clk_disable_unprepare(clki->clk); + list_del(&clki->list); + } else if (!strcmp(clki->name, "ufs_fde_min_src")) { + host->mclk.ufs_fde_min_clki = clki; + clk_disable_unprepare(clki->clk); + list_del(&clki->list); } } - if (!mclk->ufs_sel_clki || !mclk->ufs_sel_max_clki || - !mclk->ufs_sel_min_clki) { + list_for_each_entry(clki, head, list) { + dev_info(hba->dev, "clk \"%s\" present", clki->name); + } + + if (!ufs_mtk_is_clk_scale_ready(hba)) { hba->caps &= ~UFSHCD_CAP_CLK_SCALING; dev_info(hba->dev, "%s: Clk-scaling not ready. Feature disabled.", __func__); + return; + } + + /* + * Default get vcore if dts have these settings. + * No matter clock scaling support or not. (may disable by customer) + */ + reg = devm_regulator_get_optional(dev, "dvfsrc-vcore"); + if (IS_ERR(reg)) { + dev_info(dev, "failed to get dvfsrc-vcore: %ld", + PTR_ERR(reg)); + return; + } + + if (of_property_read_u32(dev->of_node, "clk-scale-up-vcore-min", + &volt)) { + dev_info(dev, "failed to get clk-scale-up-vcore-min"); + return; + } + + host->mclk.reg_vcore = reg; + host->mclk.vcore_volt = volt; + + /* If default boot is max gear, request vcore */ + if (reg && volt && host->clk_scale_up) { + if (regulator_set_voltage(reg, volt, INT_MAX)) { + dev_info(hba->dev, + "Failed to set vcore to %d\n", volt); + } } } @@ -1014,13 +1173,17 @@ static int ufs_mtk_init(struct ufs_hba *hba) /* Enable clk scaling*/ hba->caps |= UFSHCD_CAP_CLK_SCALING; + host->clk_scale_up = true; /* default is max freq */ /* Set runtime pm delay to replace default */ shost->rpm_autosuspend_delay = MTK_RPM_AUTOSUSPEND_DELAY_MS; hba->quirks |= UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL; + hba->quirks |= UFSHCD_QUIRK_MCQ_BROKEN_INTR; - hba->quirks |= UFSHCD_QUIRK_MCQ_BROKEN_RTC; + if (host->caps & UFS_MTK_CAP_MCQ_BROKEN_RTC) + hba->quirks |= UFSHCD_QUIRK_MCQ_BROKEN_RTC; + hba->vps->wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(80); if (host->caps & UFS_MTK_CAP_DISABLE_AH8) @@ -1050,7 +1213,7 @@ static int ufs_mtk_init(struct ufs_hba *hba) ufs_mtk_setup_clocks(hba, true, POST_CHANGE); - host->ip_ver = ufshcd_readl(hba, REG_UFS_MTK_IP_VER); + ufs_mtk_get_hw_ip_version(hba); goto out; @@ -1505,6 +1668,13 @@ static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba) { struct ufs_dev_info *dev_info = &hba->dev_info; u16 mid = dev_info->wmanufacturerid; + unsigned int cpu; + + if (hba->mcq_enabled) { + /* Iterate all cpus to set affinity for mcq irqs */ + for (cpu = 0; cpu < nr_cpu_ids; cpu++) + ufs_mtk_mcq_set_irq_affinity(hba, cpu); + } if (mid == UFS_VENDOR_SAMSUNG) { ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6); @@ -1598,24 +1768,30 @@ static void ufs_mtk_config_scaling_param(struct ufs_hba *hba, hba->vps->ondemand_data.downdifferential = 20; } -/** - * ufs_mtk_clk_scale - Internal clk scaling operation - * - * MTK platform supports clk scaling by switching parent of ufs_sel(mux). - * The ufs_sel downstream to ufs_ck which feeds directly to UFS hardware. - * Max and min clocks rate of ufs_sel defined in dts should match rate of - * "ufs_sel_max_src" and "ufs_sel_min_src" respectively. - * This prevent changing rate of pll clock that is shared between modules. - * - * @hba: per adapter instance - * @scale_up: True for scaling up and false for scaling down - */ -static void ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up) +static void _ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up) { struct ufs_mtk_host *host = ufshcd_get_variant(hba); struct ufs_mtk_clk *mclk = &host->mclk; struct ufs_clk_info *clki = mclk->ufs_sel_clki; - int ret = 0; + struct ufs_clk_info *fde_clki = mclk->ufs_fde_clki; + struct regulator *reg; + int volt, ret = 0; + bool clk_bind_vcore = false; + bool clk_fde_scale = false; + + if (!hba->clk_scaling.is_initialized) + return; + + if (!clki || !fde_clki) + return; + + reg = host->mclk.reg_vcore; + volt = host->mclk.vcore_volt; + if (reg && volt != 0) + clk_bind_vcore = true; + + if (mclk->ufs_fde_max_clki && mclk->ufs_fde_min_clki) + clk_fde_scale = true; ret = clk_prepare_enable(clki->clk); if (ret) { @@ -1624,21 +1800,109 @@ static void ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up) return; } + if (clk_fde_scale) { + ret = clk_prepare_enable(fde_clki->clk); + if (ret) { + dev_info(hba->dev, + "fde clk_prepare_enable() fail, ret: %d\n", ret); + return; + } + } + if (scale_up) { + if (clk_bind_vcore) { + ret = regulator_set_voltage(reg, volt, INT_MAX); + if (ret) { + dev_info(hba->dev, + "Failed to set vcore to %d\n", volt); + goto out; + } + } + ret = clk_set_parent(clki->clk, mclk->ufs_sel_max_clki->clk); - clki->curr_freq = clki->max_freq; + if (ret) { + dev_info(hba->dev, "Failed to set clk mux, ret = %d\n", + ret); + } + + if (clk_fde_scale) { + ret = clk_set_parent(fde_clki->clk, + mclk->ufs_fde_max_clki->clk); + if (ret) { + dev_info(hba->dev, + "Failed to set fde clk mux, ret = %d\n", + ret); + } + } } else { + if (clk_fde_scale) { + ret = clk_set_parent(fde_clki->clk, + mclk->ufs_fde_min_clki->clk); + if (ret) { + dev_info(hba->dev, + "Failed to set fde clk mux, ret = %d\n", + ret); + goto out; + } + } + ret = clk_set_parent(clki->clk, mclk->ufs_sel_min_clki->clk); - clki->curr_freq = clki->min_freq; - } + if (ret) { + dev_info(hba->dev, "Failed to set clk mux, ret = %d\n", + ret); + goto out; + } - if (ret) { - dev_info(hba->dev, - "Failed to set ufs_sel_clki, ret: %d\n", ret); + if (clk_bind_vcore) { + ret = regulator_set_voltage(reg, 0, INT_MAX); + if (ret) { + dev_info(hba->dev, + "failed to set vcore to MIN\n"); + } + } } +out: clk_disable_unprepare(clki->clk); + if (clk_fde_scale) + clk_disable_unprepare(fde_clki->clk); +} + +/** + * ufs_mtk_clk_scale - Internal clk scaling operation + * + * MTK platform supports clk scaling by switching parent of ufs_sel(mux). + * The ufs_sel downstream to ufs_ck which feeds directly to UFS hardware. + * Max and min clocks rate of ufs_sel defined in dts should match rate of + * "ufs_sel_max_src" and "ufs_sel_min_src" respectively. + * This prevent changing rate of pll clock that is shared between modules. + * + * @hba: per adapter instance + * @scale_up: True for scaling up and false for scaling down + */ +static void ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + struct ufs_mtk_clk *mclk = &host->mclk; + struct ufs_clk_info *clki = mclk->ufs_sel_clki; + + if (host->clk_scale_up == scale_up) + goto out; + + if (scale_up) + _ufs_mtk_clk_scale(hba, true); + else + _ufs_mtk_clk_scale(hba, false); + + host->clk_scale_up = scale_up; + + /* Must always set before clk_set_rate() */ + if (scale_up) + clki->curr_freq = clki->max_freq; + else + clki->curr_freq = clki->min_freq; +out: trace_ufs_mtk_clk_scale(clki->name, scale_up, clk_get_rate(clki->clk)); } diff --git a/drivers/ufs/host/ufs-mediatek.h b/drivers/ufs/host/ufs-mediatek.h index 05d76a6bd772..e46dc5fa209d 100644 --- a/drivers/ufs/host/ufs-mediatek.h +++ b/drivers/ufs/host/ufs-mediatek.h @@ -133,6 +133,8 @@ enum ufs_mtk_host_caps { UFS_MTK_CAP_DISABLE_MCQ = 1 << 8, /* Control MTCMOS with RTFF */ UFS_MTK_CAP_RTFF_MTCMOS = 1 << 9, + + UFS_MTK_CAP_MCQ_BROKEN_RTC = 1 << 10, }; struct ufs_mtk_crypt_cfg { @@ -147,6 +149,11 @@ struct ufs_mtk_clk { struct ufs_clk_info *ufs_sel_clki; /* Mux */ struct ufs_clk_info *ufs_sel_max_clki; /* Max src */ struct ufs_clk_info *ufs_sel_min_clki; /* Min src */ + struct ufs_clk_info *ufs_fde_clki; /* Mux */ + struct ufs_clk_info *ufs_fde_max_clki; /* Max src */ + struct ufs_clk_info *ufs_fde_min_clki; /* Min src */ + struct regulator *reg_vcore; + int vcore_volt; }; struct ufs_mtk_hw_ver { @@ -176,9 +183,11 @@ struct ufs_mtk_host { bool mphy_powered_on; bool unipro_lpm; bool ref_clk_enabled; + bool clk_scale_up; u16 ref_clk_ungating_wait_us; u16 ref_clk_gating_wait_us; u32 ip_ver; + bool legacy_ip_ver; bool mcq_set_intr; bool is_mcq_intr_enabled; @@ -192,4 +201,27 @@ struct ufs_mtk_host { /* MTK RTT support number */ #define MTK_MAX_NUM_RTT 2 +/* UFSHCI MTK ip version value */ +enum { + /* UFSHCI 3.1 */ + IP_VER_MT6983 = 0x10360000, + IP_VER_MT6878 = 0x10420200, + + /* UFSHCI 4.0 */ + IP_VER_MT6897 = 0x10440000, + IP_VER_MT6989 = 0x10450000, + IP_VER_MT6899 = 0x10450100, + IP_VER_MT6991_A0 = 0x10460000, + IP_VER_MT6991_B0 = 0x10470000, + IP_VER_MT6993 = 0x10480000, + + IP_VER_NONE = 0xFFFFFFFF +}; + +enum ip_ver_legacy { + IP_LEGACY_VER_MT6781 = 0x10380000, + IP_LEGACY_VER_MT6879 = 0x10360000, + IP_LEGACY_VER_MT6893 = 0x20160706 +}; + #endif /* !_UFS_MEDIATEK_H */ diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c index 18a978452001..d15f1a13b3b5 100644 --- a/drivers/ufs/host/ufs-qcom.c +++ b/drivers/ufs/host/ufs-qcom.c @@ -552,11 +552,32 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba) */ static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba) { + int err; + + /* Enable UTP internal clock gating */ ufshcd_rmwl(hba, REG_UFS_CFG2_CGC_EN_ALL, REG_UFS_CFG2_CGC_EN_ALL, REG_UFS_CFG2); /* Ensure that HW clock gating is enabled before next operations */ ufshcd_readl(hba, REG_UFS_CFG2); + + /* Enable Unipro internal clock gating */ + err = ufshcd_dme_rmw(hba, DL_VS_CLK_CFG_MASK, + DL_VS_CLK_CFG_MASK, DL_VS_CLK_CFG); + if (err) + goto out; + + err = ufshcd_dme_rmw(hba, PA_VS_CLK_CFG_REG_MASK, + PA_VS_CLK_CFG_REG_MASK, PA_VS_CLK_CFG_REG); + if (err) + goto out; + + err = ufshcd_dme_rmw(hba, DME_VS_CORE_CLK_CTRL_DME_HW_CGC_EN, + DME_VS_CORE_CLK_CTRL_DME_HW_CGC_EN, + DME_VS_CORE_CLK_CTRL); +out: + if (err) + dev_err(hba->dev, "hw clk gating enabled failed\n"); } static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba, @@ -1873,7 +1894,6 @@ static int ufs_qcom_device_reset(struct ufs_hba *hba) return 0; } -#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND) static void ufs_qcom_config_scaling_param(struct ufs_hba *hba, struct devfreq_dev_profile *p, struct devfreq_simple_ondemand_data *d) @@ -1885,13 +1905,6 @@ static void ufs_qcom_config_scaling_param(struct ufs_hba *hba, hba->clk_scaling.suspend_on_no_request = true; } -#else -static void ufs_qcom_config_scaling_param(struct ufs_hba *hba, - struct devfreq_dev_profile *p, - struct devfreq_simple_ondemand_data *data) -{ -} -#endif /* Resources */ static const struct ufshcd_res_info ufs_res_info[RES_MAX] = { @@ -2109,8 +2122,7 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba) retain_and_null_ptr(qi); - if (host->hw_ver.major == 6 && host->hw_ver.minor == 0 && - host->hw_ver.step == 0) { + if (host->hw_ver.major >= 6) { ufshcd_rmwl(hba, ESI_VEC_MASK, FIELD_PREP(ESI_VEC_MASK, MAX_ESI_VEC - 1), REG_UFS_CFG3); } diff --git a/drivers/ufs/host/ufs-qcom.h b/drivers/ufs/host/ufs-qcom.h index 0a5cfc2dd4f7..e0e129af7c16 100644 --- a/drivers/ufs/host/ufs-qcom.h +++ b/drivers/ufs/host/ufs-qcom.h @@ -24,6 +24,15 @@ #define UFS_QCOM_LIMIT_HS_RATE PA_HS_MODE_B +/* bit and mask definitions for PA_VS_CLK_CFG_REG attribute */ +#define PA_VS_CLK_CFG_REG 0x9004 +#define PA_VS_CLK_CFG_REG_MASK GENMASK(8, 0) + +/* bit and mask definitions for DL_VS_CLK_CFG attribute */ +#define DL_VS_CLK_CFG 0xA00B +#define DL_VS_CLK_CFG_MASK GENMASK(9, 0) +#define DME_VS_CORE_CLK_CTRL_DME_HW_CGC_EN BIT(9) + /* QCOM UFS host controller vendor specific registers */ enum { REG_UFS_SYS1CLK_1US = 0xC0, diff --git a/drivers/ufs/host/ufshcd-pci.c b/drivers/ufs/host/ufshcd-pci.c index 996387906aa1..b39239f641f2 100644 --- a/drivers/ufs/host/ufshcd-pci.c +++ b/drivers/ufs/host/ufshcd-pci.c @@ -22,17 +22,12 @@ #define MAX_SUPP_MAC 64 -struct ufs_host { - void (*late_init)(struct ufs_hba *hba); -}; - enum intel_ufs_dsm_func_id { INTEL_DSM_FNS = 0, INTEL_DSM_RESET = 1, }; struct intel_host { - struct ufs_host ufs_host; u32 dsm_fns; u32 active_ltr; u32 idle_ltr; @@ -408,8 +403,14 @@ static int ufs_intel_ehl_init(struct ufs_hba *hba) return ufs_intel_common_init(hba); } -static void ufs_intel_lkf_late_init(struct ufs_hba *hba) +static int ufs_intel_lkf_init(struct ufs_hba *hba) { + int err; + + hba->nop_out_timeout = 200; + hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8; + hba->caps |= UFSHCD_CAP_CRYPTO; + err = ufs_intel_common_init(hba); /* LKF always needs a full reset, so set PM accordingly */ if (hba->caps & UFSHCD_CAP_DEEPSLEEP) { hba->spm_lvl = UFS_PM_LVL_6; @@ -418,19 +419,6 @@ static void ufs_intel_lkf_late_init(struct ufs_hba *hba) hba->spm_lvl = UFS_PM_LVL_5; hba->rpm_lvl = UFS_PM_LVL_5; } -} - -static int ufs_intel_lkf_init(struct ufs_hba *hba) -{ - struct ufs_host *ufs_host; - int err; - - hba->nop_out_timeout = 200; - hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8; - hba->caps |= UFSHCD_CAP_CRYPTO; - err = ufs_intel_common_init(hba); - ufs_host = ufshcd_get_variant(hba); - ufs_host->late_init = ufs_intel_lkf_late_init; return err; } @@ -444,6 +432,8 @@ static int ufs_intel_adl_init(struct ufs_hba *hba) static int ufs_intel_mtl_init(struct ufs_hba *hba) { + hba->rpm_lvl = UFS_PM_LVL_2; + hba->spm_lvl = UFS_PM_LVL_2; hba->caps |= UFSHCD_CAP_CRYPTO | UFSHCD_CAP_WB_EN; return ufs_intel_common_init(hba); } @@ -574,7 +564,6 @@ static void ufshcd_pci_remove(struct pci_dev *pdev) static int ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { - struct ufs_host *ufs_host; struct ufs_hba *hba; void __iomem *mmio_base; int err; @@ -607,10 +596,6 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) return err; } - ufs_host = ufshcd_get_variant(hba); - if (ufs_host && ufs_host->late_init) - ufs_host->late_init(hba); - pm_runtime_put_noidle(&pdev->dev); pm_runtime_allow(&pdev->dev); diff --git a/include/scsi/sas_ata.h b/include/scsi/sas_ata.h index 92e27e7bf088..a161c0222931 100644 --- a/include/scsi/sas_ata.h +++ b/include/scsi/sas_ata.h @@ -15,89 +15,37 @@ #ifdef CONFIG_SCSI_SAS_ATA -static inline int dev_is_sata(struct domain_device *dev) +static inline bool dev_is_sata(struct domain_device *dev) { - return dev->dev_type == SAS_SATA_DEV || dev->dev_type == SAS_SATA_PM || - dev->dev_type == SAS_SATA_PM_PORT || dev->dev_type == SAS_SATA_PENDING; + switch (dev->dev_type) { + case SAS_SATA_DEV: + case SAS_SATA_PENDING: + case SAS_SATA_PM: + case SAS_SATA_PM_PORT: + return true; + default: + return false; + } } -int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy); -int sas_ata_init(struct domain_device *dev); -void sas_ata_task_abort(struct sas_task *task); -void sas_ata_strategy_handler(struct Scsi_Host *shost); -void sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q); void sas_ata_schedule_reset(struct domain_device *dev); -void sas_ata_wait_eh(struct domain_device *dev); -void sas_probe_sata(struct asd_sas_port *port); -void sas_suspend_sata(struct asd_sas_port *port); -void sas_resume_sata(struct asd_sas_port *port); -void sas_ata_end_eh(struct ata_port *ap); void sas_ata_device_link_abort(struct domain_device *dev, bool force_reset); -int sas_execute_ata_cmd(struct domain_device *device, u8 *fis, - int force_phy_id); +int sas_execute_ata_cmd(struct domain_device *device, u8 *fis, int force_phy_id); int smp_ata_check_ready_type(struct ata_link *link); -int sas_discover_sata(struct domain_device *dev); -int sas_ata_add_dev(struct domain_device *parent, struct ex_phy *phy, - struct domain_device *child, int phy_id); extern const struct attribute_group sas_ata_sdev_attr_group; #else -static inline void sas_ata_disabled_notice(void) -{ - pr_notice_once("ATA device seen but CONFIG_SCSI_SAS_ATA=N\n"); -} - -static inline int dev_is_sata(struct domain_device *dev) -{ - return 0; -} -static inline int sas_ata_init(struct domain_device *dev) -{ - return 0; -} -static inline void sas_ata_task_abort(struct sas_task *task) -{ -} - -static inline void sas_ata_strategy_handler(struct Scsi_Host *shost) -{ -} - -static inline void sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q) +static inline bool dev_is_sata(struct domain_device *dev) { + return false; } static inline void sas_ata_schedule_reset(struct domain_device *dev) { } -static inline void sas_ata_wait_eh(struct domain_device *dev) -{ -} - -static inline void sas_probe_sata(struct asd_sas_port *port) -{ -} - -static inline void sas_suspend_sata(struct asd_sas_port *port) -{ -} - -static inline void sas_resume_sata(struct asd_sas_port *port) -{ -} - -static inline int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy) -{ - return 0; -} - -static inline void sas_ata_end_eh(struct ata_port *ap) -{ -} - static inline void sas_ata_device_link_abort(struct domain_device *dev, bool force_reset) { @@ -114,19 +62,6 @@ static inline int smp_ata_check_ready_type(struct ata_link *link) return 0; } -static inline int sas_discover_sata(struct domain_device *dev) -{ - sas_ata_disabled_notice(); - return -ENXIO; -} - -static inline int sas_ata_add_dev(struct domain_device *parent, struct ex_phy *phy, - struct domain_device *child, int phy_id) -{ - sas_ata_disabled_notice(); - return -ENODEV; -} - #define sas_ata_sdev_attr_group ((struct attribute_group) {}) #endif diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index 68dd49947d04..6d6500148c4b 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -184,6 +184,11 @@ struct scsi_device { */ unsigned force_runtime_start_on_system_start:1; + /* + * Set if the device is an ATA device. + */ + unsigned is_ata:1; + unsigned removable:1; unsigned changed:1; /* Data invalid due to media change */ unsigned busy:1; /* Used to prevent races */ diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h index d02b55261307..b908aacfef48 100644 --- a/include/scsi/scsi_transport_fc.h +++ b/include/scsi/scsi_transport_fc.h @@ -383,6 +383,8 @@ struct fc_rport { /* aka fc_starget_attrs */ struct work_struct stgt_delete_work; struct work_struct rport_delete_work; struct request_queue *rqst_q; /* bsg support */ + + struct workqueue_struct *devloss_work_q; } __attribute__((aligned(sizeof(unsigned long)))); /* bit field values for struct fc_rport "flags" field: */ @@ -576,7 +578,6 @@ struct fc_host_attrs { /* work queues for rport state manipulation */ struct workqueue_struct *work_q; - struct workqueue_struct *devloss_work_q; /* bsg support */ struct request_queue *rqst_q; @@ -654,8 +655,6 @@ struct fc_host_attrs { (((struct fc_host_attrs *)(x)->shost_data)->npiv_vports_inuse) #define fc_host_work_q(x) \ (((struct fc_host_attrs *)(x)->shost_data)->work_q) -#define fc_host_devloss_work_q(x) \ - (((struct fc_host_attrs *)(x)->shost_data)->devloss_work_q) #define fc_host_dev_loss_tmo(x) \ (((struct fc_host_attrs *)(x)->shost_data)->dev_loss_tmo) #define fc_host_max_ct_payload(x) \ diff --git a/include/trace/events/scsi.h b/include/trace/events/scsi.h index bf6cc98d9122..c36c72ab7f2b 100644 --- a/include/trace/events/scsi.h +++ b/include/trace/events/scsi.h @@ -200,6 +200,14 @@ TRACE_EVENT(scsi_dispatch_cmd_start, __print_hex(__get_dynamic_array(cmnd), __entry->cmd_len)) ); +#define scsi_rtn_name(result) { result, #result } +#define show_rtn_name(val) \ + __print_symbolic(val, \ + scsi_rtn_name(SCSI_MLQUEUE_HOST_BUSY), \ + scsi_rtn_name(SCSI_MLQUEUE_DEVICE_BUSY), \ + scsi_rtn_name(SCSI_MLQUEUE_EH_RETRY), \ + scsi_rtn_name(SCSI_MLQUEUE_TARGET_BUSY)) + TRACE_EVENT(scsi_dispatch_cmd_error, TP_PROTO(struct scsi_cmnd *cmd, int rtn), @@ -240,14 +248,15 @@ TRACE_EVENT(scsi_dispatch_cmd_error, TP_printk("host_no=%u channel=%u id=%u lun=%u data_sgl=%u prot_sgl=%u" \ " prot_op=%s driver_tag=%d scheduler_tag=%d cmnd=(%s %s raw=%s)" \ - " rtn=%d", + " rtn=%s", __entry->host_no, __entry->channel, __entry->id, __entry->lun, __entry->data_sglen, __entry->prot_sglen, show_prot_op_name(__entry->prot_op), __entry->driver_tag, __entry->scheduler_tag, show_opcode_name(__entry->opcode), __parse_cdb(__get_dynamic_array(cmnd), __entry->cmd_len), __print_hex(__get_dynamic_array(cmnd), __entry->cmd_len), - __entry->rtn) + show_rtn_name(__entry->rtn) + ) ); DECLARE_EVENT_CLASS(scsi_cmd_done_timeout_template, diff --git a/include/ufs/ufs.h b/include/ufs/ufs.h index c0c59a8f7256..72fd385037a6 100644 --- a/include/ufs/ufs.h +++ b/include/ufs/ufs.h @@ -182,6 +182,11 @@ enum attr_idn { QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE = 0x1F, QUERY_ATTR_IDN_TIMESTAMP = 0x30, QUERY_ATTR_IDN_DEV_LVL_EXCEPTION_ID = 0x34, + QUERY_ATTR_IDN_HID_DEFRAG_OPERATION = 0x35, + QUERY_ATTR_IDN_HID_AVAILABLE_SIZE = 0x36, + QUERY_ATTR_IDN_HID_SIZE = 0x37, + QUERY_ATTR_IDN_HID_PROGRESS_RATIO = 0x38, + QUERY_ATTR_IDN_HID_STATE = 0x39, QUERY_ATTR_IDN_WB_BUF_RESIZE_HINT = 0x3C, QUERY_ATTR_IDN_WB_BUF_RESIZE_EN = 0x3D, QUERY_ATTR_IDN_WB_BUF_RESIZE_STATUS = 0x3E, @@ -401,6 +406,7 @@ enum { UFS_DEV_HPB_SUPPORT = BIT(7), UFS_DEV_WRITE_BOOSTER_SUP = BIT(8), UFS_DEV_LVL_EXCEPTION_SUP = BIT(12), + UFS_DEV_HID_SUPPORT = BIT(13), }; #define UFS_DEV_HPB_SUPPORT_VERSION 0x310 @@ -466,6 +472,24 @@ enum ufs_ref_clk_freq { REF_CLK_FREQ_INVAL = -1, }; +/* bDefragOperation attribute values */ +enum ufs_hid_defrag_operation { + HID_ANALYSIS_AND_DEFRAG_DISABLE = 0, + HID_ANALYSIS_ENABLE = 1, + HID_ANALYSIS_AND_DEFRAG_ENABLE = 2, +}; + +/* bHIDState attribute values */ +enum ufs_hid_state { + HID_IDLE = 0, + ANALYSIS_IN_PROGRESS = 1, + DEFRAG_REQUIRED = 2, + DEFRAG_IN_PROGRESS = 3, + DEFRAG_COMPLETED = 4, + DEFRAG_NOT_REQUIRED = 5, + NUM_UFS_HID_STATES = 6, +}; + /* bWriteBoosterBufferResizeEn attribute */ enum wb_resize_en { WB_RESIZE_EN_IDLE = 0, @@ -625,6 +649,8 @@ struct ufs_dev_info { u32 rtc_update_period; u8 rtt_cap; /* bDeviceRTTCap */ + + bool hid_sup; }; /* diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h index 9b3515cee711..1d3943777584 100644 --- a/include/ufs/ufshcd.h +++ b/include/ufs/ufshcd.h @@ -1480,6 +1480,7 @@ void ufshcd_resume_complete(struct device *dev); bool ufshcd_is_hba_active(struct ufs_hba *hba); void ufshcd_pm_qos_init(struct ufs_hba *hba); void ufshcd_pm_qos_exit(struct ufs_hba *hba); +int ufshcd_dme_rmw(struct ufs_hba *hba, u32 mask, u32 val, u32 attr); /* Wrapper functions for safely calling variant operations */ static inline int ufshcd_vops_init(struct ufs_hba *hba)