diff --git a/Documentation/devicetree/bindings/ufs/qcom,ufs.yaml b/Documentation/devicetree/bindings/ufs/qcom,ufs.yaml index 25a5edeea164..cde334e3206b 100644 --- a/Documentation/devicetree/bindings/ufs/qcom,ufs.yaml +++ b/Documentation/devicetree/bindings/ufs/qcom,ufs.yaml @@ -26,6 +26,7 @@ properties: - qcom,msm8994-ufshc - qcom,msm8996-ufshc - qcom,msm8998-ufshc + - qcom,qcs8300-ufshc - qcom,sa8775p-ufshc - qcom,sc7180-ufshc - qcom,sc7280-ufshc @@ -146,6 +147,7 @@ allOf: contains: enum: - qcom,msm8998-ufshc + - qcom,qcs8300-ufshc - qcom,sa8775p-ufshc - qcom,sc7280-ufshc - qcom,sc8180x-ufshc diff --git a/MAINTAINERS b/MAINTAINERS index c27f3190737f..6b5e9f0ffc92 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -23816,7 +23816,9 @@ F: drivers/ufs/host/*dwc* UNIVERSAL FLASH STORAGE HOST CONTROLLER DRIVER EXYNOS HOOKS M: Alim Akhtar +R: Peter Griffin L: linux-scsi@vger.kernel.org +L: linux-samsung-soc@vger.kernel.org S: Maintained F: drivers/ufs/host/ufs-exynos* diff --git a/drivers/message/fusion/mptlan.h b/drivers/message/fusion/mptlan.h index a1ec7e84d6fe..40b34f670065 100644 --- a/drivers/message/fusion/mptlan.h +++ b/drivers/message/fusion/mptlan.h @@ -51,10 +51,7 @@ #define LINUX_MPTLAN_H_INCLUDED /*****************************************************************************/ -#if !defined(__GENKSYMS__) #include -#endif - #include #include // #include diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index a0bcb0864ecd..a798e26c6402 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c @@ -4231,10 +4231,8 @@ mptsas_find_phyinfo_by_phys_disk_num(MPT_ADAPTER *ioc, u8 phys_disk_num, static void mptsas_reprobe_lun(struct scsi_device *sdev, void *data) { - int rc; - sdev->no_uld_attach = data ? 1 : 0; - rc = scsi_device_reprobe(sdev); + WARN_ON(scsi_device_reprobe(sdev)); } static void diff --git a/drivers/scsi/a3000.c b/drivers/scsi/a3000.c index ad39797890e5..bf054dd7682b 100644 --- a/drivers/scsi/a3000.c +++ b/drivers/scsi/a3000.c @@ -302,9 +302,9 @@ static void __exit amiga_a3000_scsi_remove(struct platform_device *pdev) * triggering a section mismatch warning. */ static struct platform_driver amiga_a3000_scsi_driver __refdata = { - .remove_new = __exit_p(amiga_a3000_scsi_remove), - .driver = { - .name = "amiga-a3000-scsi", + .remove = __exit_p(amiga_a3000_scsi_remove), + .driver = { + .name = "amiga-a3000-scsi", }, }; diff --git a/drivers/scsi/a4000t.c b/drivers/scsi/a4000t.c index d9103adc87fe..75b43047a155 100644 --- a/drivers/scsi/a4000t.c +++ b/drivers/scsi/a4000t.c @@ -115,9 +115,9 @@ static void __exit amiga_a4000t_scsi_remove(struct platform_device *pdev) * triggering a section mismatch warning. */ static struct platform_driver amiga_a4000t_scsi_driver __refdata = { - .remove_new = __exit_p(amiga_a4000t_scsi_remove), - .driver = { - .name = "amiga-a4000t-scsi", + .remove = __exit_p(amiga_a4000t_scsi_remove), + .driver = { + .name = "amiga-a4000t-scsi", }, }; diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index 1d09d3ac6aa4..8c384c25dca1 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -2736,7 +2736,6 @@ unsigned int aac_intr_normal(struct aac_dev *dev, u32 Index, int isAif, int isFastResponse, struct hw_fib *aif_fib); int aac_reset_adapter(struct aac_dev *dev, int forced, u8 reset_type); -int aac_check_health(struct aac_dev * dev); int aac_command_thread(void *data); int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context *fibctx); int aac_fib_adapter_complete(struct fib * fibptr, unsigned short size); diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index 47287559c768..ffef61c4aa01 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -1698,127 +1698,6 @@ int aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type) return retval; } -int aac_check_health(struct aac_dev * aac) -{ - int BlinkLED; - unsigned long time_now, flagv = 0; - struct list_head * entry; - - /* Extending the scope of fib_lock slightly to protect aac->in_reset */ - if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0) - return 0; - - if (aac->in_reset || !(BlinkLED = aac_adapter_check_health(aac))) { - spin_unlock_irqrestore(&aac->fib_lock, flagv); - return 0; /* OK */ - } - - aac->in_reset = 1; - - /* Fake up an AIF: - * aac_aifcmd.command = AifCmdEventNotify = 1 - * aac_aifcmd.seqnum = 0xFFFFFFFF - * aac_aifcmd.data[0] = AifEnExpEvent = 23 - * aac_aifcmd.data[1] = AifExeFirmwarePanic = 3 - * aac.aifcmd.data[2] = AifHighPriority = 3 - * aac.aifcmd.data[3] = BlinkLED - */ - - time_now = jiffies/HZ; - entry = aac->fib_list.next; - - /* - * For each Context that is on the - * fibctxList, make a copy of the - * fib, and then set the event to wake up the - * thread that is waiting for it. - */ - while (entry != &aac->fib_list) { - /* - * Extract the fibctx - */ - struct aac_fib_context *fibctx = list_entry(entry, struct aac_fib_context, next); - struct hw_fib * hw_fib; - struct fib * fib; - /* - * Check if the queue is getting - * backlogged - */ - if (fibctx->count > 20) { - /* - * It's *not* jiffies folks, - * but jiffies / HZ, so do not - * panic ... - */ - u32 time_last = fibctx->jiffies; - /* - * Has it been > 2 minutes - * since the last read off - * the queue? - */ - if ((time_now - time_last) > aif_timeout) { - entry = entry->next; - aac_close_fib_context(aac, fibctx); - continue; - } - } - /* - * Warning: no sleep allowed while - * holding spinlock - */ - hw_fib = kzalloc(sizeof(struct hw_fib), GFP_ATOMIC); - fib = kzalloc(sizeof(struct fib), GFP_ATOMIC); - if (fib && hw_fib) { - struct aac_aifcmd * aif; - - fib->hw_fib_va = hw_fib; - fib->dev = aac; - aac_fib_init(fib); - fib->type = FSAFS_NTC_FIB_CONTEXT; - fib->size = sizeof (struct fib); - fib->data = hw_fib->data; - aif = (struct aac_aifcmd *)hw_fib->data; - aif->command = cpu_to_le32(AifCmdEventNotify); - aif->seqnum = cpu_to_le32(0xFFFFFFFF); - ((__le32 *)aif->data)[0] = cpu_to_le32(AifEnExpEvent); - ((__le32 *)aif->data)[1] = cpu_to_le32(AifExeFirmwarePanic); - ((__le32 *)aif->data)[2] = cpu_to_le32(AifHighPriority); - ((__le32 *)aif->data)[3] = cpu_to_le32(BlinkLED); - - /* - * Put the FIB onto the - * fibctx's fibs - */ - list_add_tail(&fib->fiblink, &fibctx->fib_list); - fibctx->count++; - /* - * Set the event to wake up the - * thread that will waiting. - */ - complete(&fibctx->completion); - } else { - printk(KERN_WARNING "aifd: didn't allocate NewFib.\n"); - kfree(fib); - kfree(hw_fib); - } - entry = entry->next; - } - - spin_unlock_irqrestore(&aac->fib_lock, flagv); - - if (BlinkLED < 0) { - printk(KERN_ERR "%s: Host adapter is dead (or got a PCI error) %d\n", - aac->name, BlinkLED); - goto out; - } - - printk(KERN_ERR "%s: Host adapter BLINK LED 0x%x\n", aac->name, BlinkLED); - -out: - aac->in_reset = 0; - return BlinkLED; -} - static inline int is_safw_raid_volume(struct aac_dev *aac, int bus, int target) { return bus == CONTAINER_CHANNEL && target < aac->maximum_num_containers; diff --git a/drivers/scsi/aic7xxx/aic7770.c b/drivers/scsi/aic7xxx/aic7770.c index 176704b24e6a..f1ce02cd569e 100644 --- a/drivers/scsi/aic7xxx/aic7770.c +++ b/drivers/scsi/aic7xxx/aic7770.c @@ -99,21 +99,6 @@ struct aic7770_identity aic7770_ident_table[] = ahc_aic7770_EISA_setup } }; -const int ahc_num_aic7770_devs = ARRAY_SIZE(aic7770_ident_table); - -struct aic7770_identity * -aic7770_find_device(uint32_t id) -{ - struct aic7770_identity *entry; - int i; - - for (i = 0; i < ahc_num_aic7770_devs; i++) { - entry = &aic7770_ident_table[i]; - if (entry->full_id == (id & entry->id_mask)) - return (entry); - } - return (NULL); -} int aic7770_config(struct ahc_softc *ahc, struct aic7770_identity *entry, u_int io) diff --git a/drivers/scsi/aic7xxx/aic7xxx.h b/drivers/scsi/aic7xxx/aic7xxx.h index 9bc755a0a2d3..20857c213c72 100644 --- a/drivers/scsi/aic7xxx/aic7xxx.h +++ b/drivers/scsi/aic7xxx/aic7xxx.h @@ -1119,7 +1119,6 @@ struct aic7770_identity { ahc_device_setup_t *setup; }; extern struct aic7770_identity aic7770_ident_table[]; -extern const int ahc_num_aic7770_devs; #define AHC_EISA_SLOT_OFFSET 0xc00 #define AHC_EISA_IOSIZE 0x100 @@ -1135,7 +1134,6 @@ int ahc_pci_test_register_access(struct ahc_softc *); void __maybe_unused ahc_pci_resume(struct ahc_softc *ahc); /*************************** EISA/VL Front End ********************************/ -struct aic7770_identity *aic7770_find_device(uint32_t); int aic7770_config(struct ahc_softc *ahc, struct aic7770_identity *, u_int port); diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c index 98a1b966a0b0..85055677666c 100644 --- a/drivers/scsi/atari_scsi.c +++ b/drivers/scsi/atari_scsi.c @@ -885,7 +885,7 @@ static void __exit atari_scsi_remove(struct platform_device *pdev) * triggering a section mismatch warning. */ static struct platform_driver atari_scsi_driver __refdata = { - .remove_new = __exit_p(atari_scsi_remove), + .remove = __exit_p(atari_scsi_remove), .driver = { .name = DRV_MODULE_NAME, }, diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h index 4cb9249e583c..a6b8c4ddea19 100644 --- a/drivers/scsi/bfa/bfa.h +++ b/drivers/scsi/bfa/bfa.h @@ -138,14 +138,6 @@ bfa_reqq_winit(struct bfa_reqq_wait_s *wqe, void (*qresume) (void *cbarg), } while (0) -/* - * PCI devices supported by the current BFA - */ -struct bfa_pciid_s { - u16 device_id; - u16 vendor_id; -}; - extern char bfa_version[]; struct bfa_iocfc_regs_s { @@ -408,9 +400,7 @@ int bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, (((&(_bfa)->modules.dconf_mod)->min_cfg) \ ? BFA_LUNMASK_MINCFG : ((bfa_get_lun_mask(_bfa))->status)) -void bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids); void bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg); -void bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg); void bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo, struct bfa_s *bfa); diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c index 3438d0b8ba06..a99a101b95ef 100644 --- a/drivers/scsi/bfa/bfa_core.c +++ b/drivers/scsi/bfa/bfa_core.c @@ -1933,24 +1933,6 @@ bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q) } } -/* - * Return the list of PCI vendor/device id lists supported by this - * BFA instance. - */ -void -bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids) -{ - static struct bfa_pciid_s __pciids[] = { - {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G2P}, - {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G1P}, - {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT}, - {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT_FC}, - }; - - *npciids = ARRAY_SIZE(__pciids); - *pciids = __pciids; -} - /* * Use this function query the default struct bfa_iocfc_cfg_s value (compiled * into BFA layer). The OS driver can then turn back and overwrite entries that @@ -1987,20 +1969,3 @@ bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg) cfg->drvcfg.delay_comp = BFA_FALSE; } - -void -bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg) -{ - bfa_cfg_get_default(cfg); - cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN; - cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN; - cfg->fwcfg.num_fcxp_reqs = BFA_FCXP_MIN; - cfg->fwcfg.num_uf_bufs = BFA_UF_MIN; - cfg->fwcfg.num_rports = BFA_RPORT_MIN; - cfg->fwcfg.num_fwtio_reqs = 0; - - cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN; - cfg->drvcfg.num_reqq_elems = BFA_REQQ_NELEMS_MIN; - cfg->drvcfg.num_rspq_elems = BFA_RSPQ_NELEMS_MIN; - cfg->drvcfg.min_cfg = BFA_TRUE; -} diff --git a/drivers/scsi/bfa/bfa_defs_fcs.h b/drivers/scsi/bfa/bfa_defs_fcs.h index 5e36620425ac..760606062c66 100644 --- a/drivers/scsi/bfa/bfa_defs_fcs.h +++ b/drivers/scsi/bfa/bfa_defs_fcs.h @@ -124,28 +124,6 @@ enum bfa_lport_offline_reason { BFA_LPORT_OFFLINE_FAB_LOGOUT, }; -/* - * FCS lport info. - */ -struct bfa_lport_info_s { - u8 port_type; /* bfa_lport_type_t : physical or - * virtual */ - u8 port_state; /* one of bfa_lport_state values */ - u8 offline_reason; /* one of bfa_lport_offline_reason_t - * values */ - wwn_t port_wwn; - wwn_t node_wwn; - - /* - * following 4 feilds are valid for Physical Ports only - */ - u32 max_vports_supp; /* Max supported vports */ - u32 num_vports_inuse; /* Num of in use vports */ - u32 max_rports_supp; /* Max supported rports */ - u32 num_rports_inuse; /* Num of doscovered rports */ - -}; - /* * FCS port statistics */ diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c index 28ae4dc14dc9..2518e36d70d3 100644 --- a/drivers/scsi/bfa/bfa_fcpim.c +++ b/drivers/scsi/bfa/bfa_fcpim.c @@ -3412,15 +3412,6 @@ bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim) } } -/* - * Notification on completions from related ioim. - */ -void -bfa_tskim_iodone(struct bfa_tskim_s *tskim) -{ - bfa_wc_down(&tskim->wc); -} - /* * Handle IOC h/w failure notification from itnim. */ diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h index 4499f84c2d81..64e4485b226e 100644 --- a/drivers/scsi/bfa/bfa_fcpim.h +++ b/drivers/scsi/bfa/bfa_fcpim.h @@ -339,7 +339,6 @@ void bfa_ioim_tov(struct bfa_ioim_s *ioim); void bfa_tskim_attach(struct bfa_fcpim_s *fcpim); void bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg); -void bfa_tskim_iodone(struct bfa_tskim_s *tskim); void bfa_tskim_iocdisable(struct bfa_tskim_s *tskim); void bfa_tskim_cleanup(struct bfa_tskim_s *tskim); void bfa_tskim_res_recfg(struct bfa_s *bfa, u16 num_tskim_fw); diff --git a/drivers/scsi/bfa/bfa_fcs.h b/drivers/scsi/bfa/bfa_fcs.h index 9788354b90da..19903539a08d 100644 --- a/drivers/scsi/bfa/bfa_fcs.h +++ b/drivers/scsi/bfa/bfa_fcs.h @@ -373,15 +373,11 @@ bfa_boolean_t bfa_fcs_lport_is_online(struct bfa_fcs_lport_s *port); struct bfa_fcs_lport_s *bfa_fcs_get_base_port(struct bfa_fcs_s *fcs); void bfa_fcs_lport_get_rport_quals(struct bfa_fcs_lport_s *port, struct bfa_rport_qualifier_s rport[], int *nrports); -wwn_t bfa_fcs_lport_get_rport(struct bfa_fcs_lport_s *port, wwn_t wwn, - int index, int nrports, bfa_boolean_t bwwn); struct bfa_fcs_lport_s *bfa_fcs_lookup_port(struct bfa_fcs_s *fcs, u16 vf_id, wwn_t lpwwn); void bfa_fcs_lport_set_symname(struct bfa_fcs_lport_s *port, char *symname); -void bfa_fcs_lport_get_info(struct bfa_fcs_lport_s *port, - struct bfa_lport_info_s *port_info); void bfa_fcs_lport_get_attr(struct bfa_fcs_lport_s *port, struct bfa_lport_attr_s *port_attr); void bfa_fcs_lport_get_stats(struct bfa_fcs_lport_s *fcs_port, @@ -416,8 +412,6 @@ struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_old_pid( struct bfa_fcs_lport_s *port, u32 pid); struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_pwwn( struct bfa_fcs_lport_s *port, wwn_t pwwn); -struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_nwwn( - struct bfa_fcs_lport_s *port, wwn_t nwwn); struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_qualifier( struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 pid); void bfa_fcs_lport_add_rport(struct bfa_fcs_lport_s *port, @@ -486,7 +480,6 @@ bfa_status_t bfa_fcs_pbc_vport_create(struct bfa_fcs_vport_s *vport, struct bfa_fcs_s *fcs, u16 vf_id, struct bfa_lport_cfg_s *port_cfg, struct bfad_vport_s *vport_drv); -bfa_boolean_t bfa_fcs_is_pbc_vport(struct bfa_fcs_vport_s *vport); bfa_status_t bfa_fcs_vport_delete(struct bfa_fcs_vport_s *vport); bfa_status_t bfa_fcs_vport_start(struct bfa_fcs_vport_s *vport); bfa_status_t bfa_fcs_vport_stop(struct bfa_fcs_vport_s *vport); @@ -494,7 +487,6 @@ void bfa_fcs_vport_get_attr(struct bfa_fcs_vport_s *vport, struct bfa_vport_attr_s *vport_attr); struct bfa_fcs_vport_s *bfa_fcs_vport_lookup(struct bfa_fcs_s *fcs, u16 vf_id, wwn_t vpwwn); -void bfa_fcs_vport_cleanup(struct bfa_fcs_vport_s *vport); void bfa_fcs_vport_online(struct bfa_fcs_vport_s *vport); void bfa_fcs_vport_offline(struct bfa_fcs_vport_s *vport); void bfa_fcs_vport_delete_comp(struct bfa_fcs_vport_s *vport); @@ -623,8 +615,6 @@ void bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport, struct bfa_rport_attr_s *attr); struct bfa_fcs_rport_s *bfa_fcs_rport_lookup(struct bfa_fcs_lport_s *port, wwn_t rpwwn); -struct bfa_fcs_rport_s *bfa_fcs_rport_lookup_by_nwwn( - struct bfa_fcs_lport_s *port, wwn_t rnwwn); void bfa_fcs_rport_set_del_timeout(u8 rport_tmo); void bfa_fcs_rport_set_max_logins(u32 max_logins); void bfa_fcs_rport_uf_recv(struct bfa_fcs_rport_s *rport, @@ -633,8 +623,6 @@ void bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport); struct bfa_fcs_rport_s *bfa_fcs_rport_create(struct bfa_fcs_lport_s *port, u32 pid); -void bfa_fcs_rport_start(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs, - struct fc_logi_s *plogi_rsp); void bfa_fcs_rport_plogi_create(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs, struct fc_logi_s *plogi); diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c index 966bf6cc6dd9..9a85f417018f 100644 --- a/drivers/scsi/bfa/bfa_fcs_lport.c +++ b/drivers/scsi/bfa/bfa_fcs_lport.c @@ -937,25 +937,6 @@ bfa_fcs_lport_get_rport_by_pwwn(struct bfa_fcs_lport_s *port, wwn_t pwwn) return NULL; } -/* - * NWWN based Lookup for a R-Port in the Port R-Port Queue - */ -struct bfa_fcs_rport_s * -bfa_fcs_lport_get_rport_by_nwwn(struct bfa_fcs_lport_s *port, wwn_t nwwn) -{ - struct bfa_fcs_rport_s *rport; - struct list_head *qe; - - list_for_each(qe, &port->rport_q) { - rport = (struct bfa_fcs_rport_s *) qe; - if (wwn_is_equal(rport->nwwn, nwwn)) - return rport; - } - - bfa_trc(port->fcs, nwwn); - return NULL; -} - /* * PWWN & PID based Lookup for a R-Port in the Port R-Port Queue */ @@ -5645,54 +5626,6 @@ bfa_fcs_get_base_port(struct bfa_fcs_s *fcs) return &fcs->fabric.bport; } -wwn_t -bfa_fcs_lport_get_rport(struct bfa_fcs_lport_s *port, wwn_t wwn, int index, - int nrports, bfa_boolean_t bwwn) -{ - struct list_head *qh, *qe; - struct bfa_fcs_rport_s *rport = NULL; - int i; - struct bfa_fcs_s *fcs; - - if (port == NULL || nrports == 0) - return (wwn_t) 0; - - fcs = port->fcs; - bfa_trc(fcs, (u32) nrports); - - i = 0; - qh = &port->rport_q; - qe = bfa_q_first(qh); - - while ((qe != qh) && (i < nrports)) { - rport = (struct bfa_fcs_rport_s *) qe; - if (bfa_ntoh3b(rport->pid) > 0xFFF000) { - qe = bfa_q_next(qe); - bfa_trc(fcs, (u32) rport->pwwn); - bfa_trc(fcs, rport->pid); - bfa_trc(fcs, i); - continue; - } - - if (bwwn) { - if (!memcmp(&wwn, &rport->pwwn, 8)) - break; - } else { - if (i == index) - break; - } - - i++; - qe = bfa_q_next(qe); - } - - bfa_trc(fcs, i); - if (rport) - return rport->pwwn; - else - return (wwn_t) 0; -} - void bfa_fcs_lport_get_rport_quals(struct bfa_fcs_lport_s *port, struct bfa_rport_qualifier_s rports[], int *nrports) @@ -5823,54 +5756,6 @@ bfa_fcs_lookup_port(struct bfa_fcs_s *fcs, u16 vf_id, wwn_t lpwwn) return NULL; } -/* - * API corresponding to NPIV_VPORT_GETINFO. - */ -void -bfa_fcs_lport_get_info(struct bfa_fcs_lport_s *port, - struct bfa_lport_info_s *port_info) -{ - - bfa_trc(port->fcs, port->fabric->fabric_name); - - if (port->vport == NULL) { - /* - * This is a Physical port - */ - port_info->port_type = BFA_LPORT_TYPE_PHYSICAL; - - /* - * @todo : need to fix the state & reason - */ - port_info->port_state = 0; - port_info->offline_reason = 0; - - port_info->port_wwn = bfa_fcs_lport_get_pwwn(port); - port_info->node_wwn = bfa_fcs_lport_get_nwwn(port); - - port_info->max_vports_supp = - bfa_lps_get_max_vport(port->fcs->bfa); - port_info->num_vports_inuse = - port->fabric->num_vports; - port_info->max_rports_supp = BFA_FCS_MAX_RPORTS_SUPP; - port_info->num_rports_inuse = port->num_rports; - } else { - /* - * This is a virtual port - */ - port_info->port_type = BFA_LPORT_TYPE_VIRTUAL; - - /* - * @todo : need to fix the state & reason - */ - port_info->port_state = 0; - port_info->offline_reason = 0; - - port_info->port_wwn = bfa_fcs_lport_get_pwwn(port); - port_info->node_wwn = bfa_fcs_lport_get_nwwn(port); - } -} - void bfa_fcs_lport_get_stats(struct bfa_fcs_lport_s *fcs_port, struct bfa_lport_stats_s *port_stats) @@ -6567,15 +6452,6 @@ bfa_fcs_vport_offline(struct bfa_fcs_vport_s *vport) bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_OFFLINE); } -/* - * Cleanup notification from fabric SM on link timer expiry. - */ -void -bfa_fcs_vport_cleanup(struct bfa_fcs_vport_s *vport) -{ - vport->vport_stats.fab_cleanup++; -} - /* * Stop notification from fabric SM. To be invoked from within FCS. */ @@ -6698,24 +6574,6 @@ bfa_fcs_pbc_vport_create(struct bfa_fcs_vport_s *vport, struct bfa_fcs_s *fcs, return rc; } -/* - * Use this function to findout if this is a pbc vport or not. - * - * @param[in] vport - pointer to bfa_fcs_vport_t. - * - * @returns None - */ -bfa_boolean_t -bfa_fcs_is_pbc_vport(struct bfa_fcs_vport_s *vport) -{ - - if (vport && (vport->lport.port_cfg.preboot_vp == BFA_TRUE)) - return BFA_TRUE; - else - return BFA_FALSE; - -} - /* * Use this function initialize the vport. * diff --git a/drivers/scsi/bfa/bfa_fcs_rport.c b/drivers/scsi/bfa/bfa_fcs_rport.c index ce52a9c88ae6..d4bde9bbe75b 100644 --- a/drivers/scsi/bfa/bfa_fcs_rport.c +++ b/drivers/scsi/bfa/bfa_fcs_rport.c @@ -2646,27 +2646,6 @@ bfa_fcs_rport_create_by_wwn(struct bfa_fcs_lport_s *port, wwn_t rpwwn) bfa_sm_send_event(rport, RPSM_EVENT_ADDRESS_DISC); return rport; } -/* - * Called by bport in private loop topology to indicate that a - * rport has been discovered and plogi has been completed. - * - * @param[in] port - base port or vport - * @param[in] rpid - remote port ID - */ -void -bfa_fcs_rport_start(struct bfa_fcs_lport_s *port, struct fchs_s *fchs, - struct fc_logi_s *plogi) -{ - struct bfa_fcs_rport_s *rport; - - rport = bfa_fcs_rport_alloc(port, WWN_NULL, fchs->s_id); - if (!rport) - return; - - bfa_fcs_rport_update(rport, plogi); - - bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_COMP); -} /* * Called by bport/vport to handle PLOGI received from a new remote port. @@ -3089,21 +3068,6 @@ bfa_fcs_rport_lookup(struct bfa_fcs_lport_s *port, wwn_t rpwwn) return rport; } -struct bfa_fcs_rport_s * -bfa_fcs_rport_lookup_by_nwwn(struct bfa_fcs_lport_s *port, wwn_t rnwwn) -{ - struct bfa_fcs_rport_s *rport; - - rport = bfa_fcs_lport_get_rport_by_nwwn(port, rnwwn); - if (rport == NULL) { - /* - * TBD Error handling - */ - } - - return rport; -} - /* * Remote port features (RPF) implementation. */ diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c index ea2f107f564c..aa68d61a2d0d 100644 --- a/drivers/scsi/bfa/bfa_ioc.c +++ b/drivers/scsi/bfa/bfa_ioc.c @@ -2254,33 +2254,12 @@ bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env) return status; } -/* - * Enable/disable IOC failure auto recovery. - */ -void -bfa_ioc_auto_recover(bfa_boolean_t auto_recover) -{ - bfa_auto_recover = auto_recover; -} - - - bfa_boolean_t bfa_ioc_is_operational(struct bfa_ioc_s *ioc) { return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op); } -bfa_boolean_t -bfa_ioc_is_initialized(struct bfa_ioc_s *ioc) -{ - u32 r32 = bfa_ioc_get_cur_ioc_fwstate(ioc); - - return ((r32 != BFI_IOC_UNINIT) && - (r32 != BFI_IOC_INITING) && - (r32 != BFI_IOC_MEMTEST)); -} - bfa_boolean_t bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg) { diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h index 3ec10503caff..d70332e9ad6d 100644 --- a/drivers/scsi/bfa/bfa_ioc.h +++ b/drivers/scsi/bfa/bfa_ioc.h @@ -919,7 +919,6 @@ void bfa_ioc_ct2_poweron(struct bfa_ioc_s *ioc); void bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn, struct bfa_timer_mod_s *timer_mod); -void bfa_ioc_auto_recover(bfa_boolean_t auto_recover); void bfa_ioc_detach(struct bfa_ioc_s *ioc); void bfa_ioc_suspend(struct bfa_ioc_s *ioc); void bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev, @@ -934,7 +933,6 @@ bfa_status_t bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, void bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *msg); void bfa_ioc_error_isr(struct bfa_ioc_s *ioc); bfa_boolean_t bfa_ioc_is_operational(struct bfa_ioc_s *ioc); -bfa_boolean_t bfa_ioc_is_initialized(struct bfa_ioc_s *ioc); bfa_boolean_t bfa_ioc_is_disabled(struct bfa_ioc_s *ioc); bfa_boolean_t bfa_ioc_is_acq_addr(struct bfa_ioc_s *ioc); bfa_boolean_t bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc); diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h index 578e7678b056..ed29ebda30da 100644 --- a/drivers/scsi/bfa/bfa_modules.h +++ b/drivers/scsi/bfa/bfa_modules.h @@ -113,7 +113,6 @@ void bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *, struct bfa_meminfo_s *, struct bfa_s *); void bfa_sgpg_attach(struct bfa_s *, void *bfad, struct bfa_iocfc_cfg_s *, struct bfa_pcidev_s *); -void bfa_uf_iocdisable(struct bfa_s *); void bfa_uf_meminfo(struct bfa_iocfc_cfg_s *, struct bfa_meminfo_s *, struct bfa_s *); void bfa_uf_attach(struct bfa_s *, void *, struct bfa_iocfc_cfg_s *, diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c index 9f33aa303b18..df33afaaa673 100644 --- a/drivers/scsi/bfa/bfa_svc.c +++ b/drivers/scsi/bfa/bfa_svc.c @@ -913,14 +913,6 @@ bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp) return reqbuf; } -u32 -bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp) -{ - struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod; - - return mod->req_pld_sz; -} - /* * Get the internal response buffer pointer * @@ -1023,21 +1015,6 @@ bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport, bfa_fcxp_queue(fcxp, send_req); } -/* - * Abort a BFA FCXP - * - * @param[in] fcxp BFA fcxp pointer - * - * @return void - */ -bfa_status_t -bfa_fcxp_abort(struct bfa_fcxp_s *fcxp) -{ - bfa_trc(fcxp->fcxp_mod->bfa, fcxp->fcxp_tag); - WARN_ON(1); - return BFA_STATUS_OK; -} - void bfa_fcxp_req_rsp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe, bfa_fcxp_alloc_cbfn_t alloc_cbfn, void *alloc_cbarg, @@ -3857,15 +3834,6 @@ bfa_fcport_clr_hardalpa(struct bfa_s *bfa) return BFA_STATUS_OK; } -bfa_boolean_t -bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa) -{ - struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - - *alpa = fcport->cfg.hardalpa; - return fcport->cfg.cfg_hardalpa; -} - u8 bfa_fcport_get_myalpa(struct bfa_s *bfa) { @@ -3923,17 +3891,6 @@ bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit) /* * Get port attributes. */ - -wwn_t -bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node) -{ - struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - if (node) - return fcport->nwwn; - else - return fcport->pwwn; -} - void bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr) { @@ -4105,18 +4062,6 @@ bfa_fcport_is_ratelim(struct bfa_s *bfa) } -/* - * Enable/Disable FAA feature in port config - */ -void -bfa_fcport_cfg_faa(struct bfa_s *bfa, u8 state) -{ - struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - - bfa_trc(bfa, state); - fcport->cfg.faa_state = state; -} - /* * Get default minimum ratelim speed */ @@ -5528,23 +5473,6 @@ uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m) bfa_cb_queue(bfa, &uf->hcb_qe, __bfa_cb_uf_recv, uf); } -void -bfa_uf_iocdisable(struct bfa_s *bfa) -{ - struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa); - struct bfa_uf_s *uf; - struct list_head *qe, *qen; - - /* Enqueue unused uf resources to free_q */ - list_splice_tail_init(&ufm->uf_unused_q, &ufm->uf_free_q); - - list_for_each_safe(qe, qen, &ufm->uf_posted_q) { - uf = (struct bfa_uf_s *) qe; - list_del(&uf->qe); - bfa_uf_put(ufm, uf); - } -} - void bfa_uf_start(struct bfa_s *bfa) { diff --git a/drivers/scsi/bfa/bfa_svc.h b/drivers/scsi/bfa/bfa_svc.h index 26eeee82bedc..99a960a8a2db 100644 --- a/drivers/scsi/bfa/bfa_svc.h +++ b/drivers/scsi/bfa/bfa_svc.h @@ -587,14 +587,12 @@ bfa_status_t bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_port_topology bfa_fcport_get_topology(struct bfa_s *bfa); enum bfa_port_topology bfa_fcport_get_cfg_topology(struct bfa_s *bfa); bfa_status_t bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa); -bfa_boolean_t bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa); u8 bfa_fcport_get_myalpa(struct bfa_s *bfa); bfa_status_t bfa_fcport_clr_hardalpa(struct bfa_s *bfa); bfa_status_t bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxsize); u16 bfa_fcport_get_maxfrsize(struct bfa_s *bfa); u8 bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa); void bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr); -wwn_t bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node); void bfa_fcport_event_register(struct bfa_s *bfa, void (*event_cbfn) (void *cbarg, enum bfa_port_linkstate event), void *event_cbarg); @@ -619,7 +617,6 @@ bfa_boolean_t bfa_fcport_is_trunk_enabled(struct bfa_s *bfa); void bfa_fcport_dportenable(struct bfa_s *bfa); void bfa_fcport_dportdisable(struct bfa_s *bfa); bfa_status_t bfa_fcport_is_pbcdisabled(struct bfa_s *bfa); -void bfa_fcport_cfg_faa(struct bfa_s *bfa, u8 state); bfa_status_t bfa_fcport_cfg_bbcr(struct bfa_s *bfa, bfa_boolean_t on_off, u8 bb_scn); bfa_status_t bfa_fcport_get_bbcr_attr(struct bfa_s *bfa, @@ -687,8 +684,6 @@ void bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport, bfa_cb_fcxp_send_t cbfn, void *cbarg, u32 rsp_maxlen, u8 rsp_timeout); -bfa_status_t bfa_fcxp_abort(struct bfa_fcxp_s *fcxp); -u32 bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp); u32 bfa_fcxp_get_maxrsp(struct bfa_s *bfa); void bfa_fcxp_res_recfg(struct bfa_s *bfa, u16 num_fcxp_fw); diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c index 62cb7a864fd5..6aa1d3a7e24b 100644 --- a/drivers/scsi/bfa/bfad.c +++ b/drivers/scsi/bfa/bfad.c @@ -842,26 +842,6 @@ bfad_drv_init(struct bfad_s *bfad) return BFA_STATUS_OK; } -void -bfad_drv_uninit(struct bfad_s *bfad) -{ - unsigned long flags; - - spin_lock_irqsave(&bfad->bfad_lock, flags); - init_completion(&bfad->comp); - bfa_iocfc_stop(&bfad->bfa); - spin_unlock_irqrestore(&bfad->bfad_lock, flags); - wait_for_completion(&bfad->comp); - - del_timer_sync(&bfad->hal_tmo); - bfa_isr_disable(&bfad->bfa); - bfa_detach(&bfad->bfa); - bfad_remove_intr(bfad); - bfad_hal_mem_release(bfad); - - bfad->bfad_flags &= ~BFAD_DRV_INIT_DONE; -} - void bfad_drv_start(struct bfad_s *bfad) { @@ -1693,9 +1673,8 @@ bfad_init(void) error = bfad_im_module_init(); if (error) { - error = -ENOMEM; printk(KERN_WARNING "bfad_im_module_init failure\n"); - goto ext; + return -ENOMEM; } if (strcmp(FCPI_NAME, " fcpim") == 0) diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h index da42e3261237..ce2ec5108947 100644 --- a/drivers/scsi/bfa/bfad_drv.h +++ b/drivers/scsi/bfa/bfad_drv.h @@ -312,7 +312,6 @@ void bfad_bfa_tmo(struct timer_list *t); void bfad_init_timer(struct bfad_s *bfad); int bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad); void bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad); -void bfad_drv_uninit(struct bfad_s *bfad); int bfad_worker(void *ptr); void bfad_debugfs_init(struct bfad_port_s *port); void bfad_debugfs_exit(struct bfad_port_s *port); diff --git a/drivers/scsi/bfa/bfi.h b/drivers/scsi/bfa/bfi.h index 41e6b4dac056..e1e0e967bcc3 100644 --- a/drivers/scsi/bfa/bfi.h +++ b/drivers/scsi/bfa/bfi.h @@ -1148,7 +1148,7 @@ struct bfi_diag_dport_scn_testcomp_s { u16 numbuffer; /* from switch */ u8 subtest_status[DPORT_TEST_MAX]; /* 4 bytes */ u32 latency; /* from switch */ - u32 distance; /* from swtich unit in meters */ + u32 distance; /* from switch unit in meters */ /* Buffers required to saturate the link */ u16 frm_sz; /* from switch for buf_reqd */ u8 rsvd[2]; diff --git a/drivers/scsi/bvme6000_scsi.c b/drivers/scsi/bvme6000_scsi.c index f893e9779e9d..baf5f4e47937 100644 --- a/drivers/scsi/bvme6000_scsi.c +++ b/drivers/scsi/bvme6000_scsi.c @@ -106,7 +106,7 @@ static struct platform_driver bvme6000_scsi_driver = { .name = "bvme6000-scsi", }, .probe = bvme6000_probe, - .remove_new = bvme6000_device_remove, + .remove = bvme6000_device_remove, }; static int __init bvme6000_scsi_init(void) diff --git a/drivers/scsi/esas2r/esas2r.h b/drivers/scsi/esas2r/esas2r.h index 8a133254c4f6..1e2d7c63a8e3 100644 --- a/drivers/scsi/esas2r/esas2r.h +++ b/drivers/scsi/esas2r/esas2r.h @@ -1045,10 +1045,6 @@ void esas2r_build_mgt_req(struct esas2r_adapter *a, u32 length, void *data); void esas2r_build_ae_req(struct esas2r_adapter *a, struct esas2r_request *rq); -void esas2r_build_cli_req(struct esas2r_adapter *a, - struct esas2r_request *rq, - u32 length, - u32 cmd_rsp_len); void esas2r_build_ioctl_req(struct esas2r_adapter *a, struct esas2r_request *rq, u32 length, diff --git a/drivers/scsi/esas2r/esas2r_vda.c b/drivers/scsi/esas2r/esas2r_vda.c index 30028e56df63..5aa728704dfc 100644 --- a/drivers/scsi/esas2r/esas2r_vda.c +++ b/drivers/scsi/esas2r/esas2r_vda.c @@ -444,23 +444,6 @@ void esas2r_build_ae_req(struct esas2r_adapter *a, struct esas2r_request *rq) } } -/* Build a VDA CLI request. */ -void esas2r_build_cli_req(struct esas2r_adapter *a, - struct esas2r_request *rq, - u32 length, - u32 cmd_rsp_len) -{ - struct atto_vda_cli_req *vrq = &rq->vrq->cli; - - clear_vda_request(rq); - - rq->vrq->scsi.function = VDA_FUNC_CLI; - - vrq->length = cpu_to_le32(length); - vrq->cmd_rsp_len = cpu_to_le32(cmd_rsp_len); - vrq->sg_list_offset = (u8)offsetof(struct atto_vda_cli_req, sge); -} - /* Build a VDA IOCTL request. */ void esas2r_build_ioctl_req(struct esas2r_adapter *a, struct esas2r_request *rq, diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h index d223f482488f..a44768bceb9a 100644 --- a/drivers/scsi/hisi_sas/hisi_sas.h +++ b/drivers/scsi/hisi_sas/hisi_sas.h @@ -307,6 +307,7 @@ enum { struct hisi_sas_hw { int (*hw_init)(struct hisi_hba *hisi_hba); + int (*fw_info_check)(struct hisi_hba *hisi_hba); int (*interrupt_preinit)(struct hisi_hba *hisi_hba); void (*setup_itct)(struct hisi_hba *hisi_hba, struct hisi_sas_device *device); diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index 6219807ce3b9..53cb15f6714b 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -1321,6 +1321,7 @@ static int hisi_sas_softreset_ata_disk(struct domain_device *device) } if (rc == TMF_RESP_FUNC_COMPLETE) { + usleep_range(900, 1000); ata_for_each_link(link, ap, EDGE) { int pmp = sata_srst_pmp(link); @@ -1384,6 +1385,7 @@ static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba) static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 state) { + u32 new_state = hisi_hba->hw->get_phys_state(hisi_hba); struct asd_sas_port *_sas_port = NULL; int phy_no; @@ -1397,7 +1399,7 @@ static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 state) continue; /* Report PHY state change to libsas */ - if (state & BIT(phy_no)) { + if (new_state & BIT(phy_no)) { if (do_port_check && sas_port && sas_port->port_dev) { struct domain_device *dev = sas_port->port_dev; @@ -1410,6 +1412,16 @@ static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 state) } } else { hisi_sas_phy_down(hisi_hba, phy_no, 0, GFP_KERNEL); + + /* + * The new_state is not ready but old_state is ready, + * the two possible causes: + * 1. The connected device is removed + * 2. Device exists but phyup timed out + */ + if (state & BIT(phy_no)) + hisi_sas_notify_phy_event(phy, + HISI_PHYE_LINK_RESET); } } } @@ -1545,9 +1557,15 @@ void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba) /* Init and wait for PHYs to come up and all libsas event finished. */ for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) { struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; + struct asd_sas_phy *sas_phy = &phy->sas_phy; - if (!(hisi_hba->phy_state & BIT(phy_no))) + if (!sas_phy->phy->enabled) + continue; + + if (!(hisi_hba->phy_state & BIT(phy_no))) { + hisi_sas_phy_enable(hisi_hba, phy_no, 1); continue; + } async_schedule_domain(hisi_sas_async_init_wait_phyup, phy, &async); @@ -2450,6 +2468,11 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev, if (hisi_sas_get_fw_info(hisi_hba) < 0) goto err_out; + if (hisi_hba->hw->fw_info_check) { + if (hisi_hba->hw->fw_info_check(hisi_hba)) + goto err_out; + } + error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); if (error) { dev_err(dev, "No usable DMA addressing method\n"); @@ -2630,10 +2653,10 @@ static __init int hisi_sas_init(void) static __exit void hisi_sas_exit(void) { - sas_release_transport(hisi_sas_stt); - if (hisi_sas_debugfs_enable) debugfs_remove(hisi_sas_debugfs_dir); + + sas_release_transport(hisi_sas_stt); } module_init(hisi_sas_init); diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c index 71b5008c3552..c3e571be2222 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c @@ -1734,6 +1734,23 @@ static struct attribute *host_v1_hw_attrs[] = { ATTRIBUTE_GROUPS(host_v1_hw); +static int check_fw_info_v1_hw(struct hisi_hba *hisi_hba) +{ + struct device *dev = hisi_hba->dev; + + if (hisi_hba->n_phy < 0 || hisi_hba->n_phy > 9) { + dev_err(dev, "invalid phy number from FW\n"); + return -EINVAL; + } + + if (hisi_hba->queue_count < 0 || hisi_hba->queue_count > 32) { + dev_err(dev, "invalid queue count from FW\n"); + return -EINVAL; + } + + return 0; +} + static const struct scsi_host_template sht_v1_hw = { LIBSAS_SHT_BASE_NO_SLAVE_INIT .device_configure = hisi_sas_device_configure, @@ -1747,6 +1764,7 @@ static const struct scsi_host_template sht_v1_hw = { static const struct hisi_sas_hw hisi_sas_v1_hw = { .hw_init = hisi_sas_v1_init, + .fw_info_check = check_fw_info_v1_hw, .setup_itct = setup_itct_v1_hw, .sl_notify_ssp = sl_notify_ssp_v1_hw, .clear_itct = clear_itct_v1_hw, @@ -1784,7 +1802,7 @@ MODULE_DEVICE_TABLE(acpi, sas_v1_acpi_match); static struct platform_driver hisi_sas_v1_driver = { .probe = hisi_sas_v1_probe, - .remove_new = hisi_sas_remove, + .remove = hisi_sas_remove, .driver = { .name = DRV_NAME, .of_match_table = sas_v1_of_match, diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c index 342d75f12051..1a62b5d15eca 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c @@ -3566,6 +3566,23 @@ static void map_queues_v2_hw(struct Scsi_Host *shost) } } +static int check_fw_info_v2_hw(struct hisi_hba *hisi_hba) +{ + struct device *dev = hisi_hba->dev; + + if (hisi_hba->n_phy < 0 || hisi_hba->n_phy > 9) { + dev_err(dev, "invalid phy number from FW\n"); + return -EINVAL; + } + + if (hisi_hba->queue_count < 0 || hisi_hba->queue_count > 16) { + dev_err(dev, "invalid queue count from FW\n"); + return -EINVAL; + } + + return 0; +} + static const struct scsi_host_template sht_v2_hw = { LIBSAS_SHT_BASE_NO_SLAVE_INIT .device_configure = hisi_sas_device_configure, @@ -3582,6 +3599,7 @@ static const struct scsi_host_template sht_v2_hw = { static const struct hisi_sas_hw hisi_sas_v2_hw = { .hw_init = hisi_sas_v2_init, + .fw_info_check = check_fw_info_v2_hw, .interrupt_preinit = hisi_sas_v2_interrupt_preinit, .setup_itct = setup_itct_v2_hw, .slot_index_alloc = slot_index_alloc_quirk_v2_hw, @@ -3631,7 +3649,7 @@ MODULE_DEVICE_TABLE(acpi, sas_v2_acpi_match); static struct platform_driver hisi_sas_v2_driver = { .probe = hisi_sas_v2_probe, - .remove_new = hisi_sas_remove, + .remove = hisi_sas_remove, .driver = { .name = DRV_NAME, .of_match_table = sas_v2_of_match, diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index 4cd3a3eab6f1..5db931663ae4 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c @@ -43,6 +43,7 @@ #define CQ_INT_CONVERGE_EN 0xb0 #define CFG_AGING_TIME 0xbc #define HGC_DFX_CFG2 0xc0 +#define CFG_ICT_TIMER_STEP_TRSH 0xc8 #define CFG_ABT_SET_QUERY_IPTT 0xd4 #define CFG_SET_ABORTED_IPTT_OFF 0 #define CFG_SET_ABORTED_IPTT_MSK (0xfff << CFG_SET_ABORTED_IPTT_OFF) @@ -638,9 +639,12 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba) hisi_sas_write32(hisi_hba, TRANS_LOCK_ICT_TIME, 0x4A817C80); hisi_sas_write32(hisi_hba, HGC_SAS_TXFAIL_RETRY_CTRL, 0x108); hisi_sas_write32(hisi_hba, CFG_AGING_TIME, 0x1); - hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1); - hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x1); - hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x1); + hisi_sas_write32(hisi_hba, CFG_ICT_TIMER_STEP_TRSH, 0xf4240); + hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x3); + /* configure the interrupt coalescing timeout period 10us */ + hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0xa); + /* configure the count of CQ entries 10 */ + hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0xa); hisi_sas_write32(hisi_hba, CQ_INT_CONVERGE_EN, hisi_sas_intr_conv); hisi_sas_write32(hisi_hba, OQ_INT_SRC, 0xffff); @@ -682,7 +686,7 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba) hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0); hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_DWS_RESET_MSK, 0x0); hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_OOB_RESTART_MSK, 0x1); - hisi_sas_phy_write32(hisi_hba, i, STP_LINK_TIMER, 0x7f7a120); + hisi_sas_phy_write32(hisi_hba, i, STP_LINK_TIMER, 0x7ffffff); hisi_sas_phy_write32(hisi_hba, i, CON_CFG_DRIVER, 0x2a0a01); hisi_sas_phy_write32(hisi_hba, i, SAS_EC_INT_COAL_TIME, 0x30f4240); @@ -2493,6 +2497,7 @@ static int complete_v3_hw(struct hisi_sas_cq *cq) /* update rd_point */ cq->rd_point = rd_point; hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point); + cond_resched(); return completed; } @@ -2796,14 +2801,15 @@ static void config_intr_coal_v3_hw(struct hisi_hba *hisi_hba) { /* config those registers between enable and disable PHYs */ hisi_sas_stop_phys(hisi_hba); + hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x3); if (hisi_hba->intr_coal_ticks == 0 || hisi_hba->intr_coal_count == 0) { - hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1); - hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x1); - hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x1); + /* configure the interrupt coalescing timeout period 10us */ + hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0xa); + /* configure the count of CQ entries 10 */ + hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0xa); } else { - hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x3); hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, hisi_hba->intr_coal_ticks); hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, @@ -3371,6 +3377,23 @@ static const struct hisi_sas_hw hisi_sas_v3_hw = { .debugfs_snapshot_regs = debugfs_snapshot_regs_v3_hw, }; +static int check_fw_info_v3_hw(struct hisi_hba *hisi_hba) +{ + struct device *dev = hisi_hba->dev; + + if (hisi_hba->n_phy < 0 || hisi_hba->n_phy > 8) { + dev_err(dev, "invalid phy number from FW\n"); + return -EINVAL; + } + + if (hisi_hba->queue_count < 0 || hisi_hba->queue_count > 16) { + dev_err(dev, "invalid queue count from FW\n"); + return -EINVAL; + } + + return 0; +} + static struct Scsi_Host * hisi_sas_shost_alloc_pci(struct pci_dev *pdev) { @@ -3401,6 +3424,9 @@ hisi_sas_shost_alloc_pci(struct pci_dev *pdev) if (hisi_sas_get_fw_info(hisi_hba) < 0) goto err_out; + if (check_fw_info_v3_hw(hisi_hba) < 0) + goto err_out; + if (experimental_iopoll_q_cnt < 0 || experimental_iopoll_q_cnt >= hisi_hba->queue_count) dev_err(dev, "iopoll queue count %d cannot exceed or equal 16, using default 0\n", @@ -3550,6 +3576,11 @@ debugfs_to_reg_name_v3_hw(int off, int base_off, return NULL; } +static bool debugfs_dump_is_generated_v3_hw(void *p) +{ + return p ? true : false; +} + static void debugfs_print_reg_v3_hw(u32 *regs_val, struct seq_file *s, const struct hisi_sas_debugfs_reg *reg) { @@ -3575,6 +3606,9 @@ static int debugfs_global_v3_hw_show(struct seq_file *s, void *p) { struct hisi_sas_debugfs_regs *global = s->private; + if (!debugfs_dump_is_generated_v3_hw(global->data)) + return -EPERM; + debugfs_print_reg_v3_hw(global->data, s, &debugfs_global_reg); @@ -3586,6 +3620,9 @@ static int debugfs_axi_v3_hw_show(struct seq_file *s, void *p) { struct hisi_sas_debugfs_regs *axi = s->private; + if (!debugfs_dump_is_generated_v3_hw(axi->data)) + return -EPERM; + debugfs_print_reg_v3_hw(axi->data, s, &debugfs_axi_reg); @@ -3597,6 +3634,9 @@ static int debugfs_ras_v3_hw_show(struct seq_file *s, void *p) { struct hisi_sas_debugfs_regs *ras = s->private; + if (!debugfs_dump_is_generated_v3_hw(ras->data)) + return -EPERM; + debugfs_print_reg_v3_hw(ras->data, s, &debugfs_ras_reg); @@ -3609,6 +3649,9 @@ static int debugfs_port_v3_hw_show(struct seq_file *s, void *p) struct hisi_sas_debugfs_port *port = s->private; const struct hisi_sas_debugfs_reg *reg_port = &debugfs_port_reg; + if (!debugfs_dump_is_generated_v3_hw(port->data)) + return -EPERM; + debugfs_print_reg_v3_hw(port->data, s, reg_port); return 0; @@ -3664,6 +3707,9 @@ static int debugfs_cq_v3_hw_show(struct seq_file *s, void *p) struct hisi_sas_debugfs_cq *debugfs_cq = s->private; int slot; + if (!debugfs_dump_is_generated_v3_hw(debugfs_cq->complete_hdr)) + return -EPERM; + for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++) debugfs_cq_show_slot_v3_hw(s, slot, debugfs_cq); @@ -3685,8 +3731,12 @@ static void debugfs_dq_show_slot_v3_hw(struct seq_file *s, int slot, static int debugfs_dq_v3_hw_show(struct seq_file *s, void *p) { + struct hisi_sas_debugfs_dq *debugfs_dq = s->private; int slot; + if (!debugfs_dump_is_generated_v3_hw(debugfs_dq->hdr)) + return -EPERM; + for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++) debugfs_dq_show_slot_v3_hw(s, slot, s->private); @@ -3700,6 +3750,9 @@ static int debugfs_iost_v3_hw_show(struct seq_file *s, void *p) struct hisi_sas_iost *iost = debugfs_iost->iost; int i, max_command_entries = HISI_SAS_MAX_COMMANDS; + if (!debugfs_dump_is_generated_v3_hw(iost)) + return -EPERM; + for (i = 0; i < max_command_entries; i++, iost++) { __le64 *data = &iost->qw0; @@ -3719,6 +3772,9 @@ static int debugfs_iost_cache_v3_hw_show(struct seq_file *s, void *p) int i, tab_idx; __le64 *iost; + if (!debugfs_dump_is_generated_v3_hw(iost_cache)) + return -EPERM; + for (i = 0; i < HISI_SAS_IOST_ITCT_CACHE_NUM; i++, iost_cache++) { /* * Data struct of IOST cache: @@ -3742,6 +3798,9 @@ static int debugfs_itct_v3_hw_show(struct seq_file *s, void *p) struct hisi_sas_debugfs_itct *debugfs_itct = s->private; struct hisi_sas_itct *itct = debugfs_itct->itct; + if (!debugfs_dump_is_generated_v3_hw(itct)) + return -EPERM; + for (i = 0; i < HISI_SAS_MAX_ITCT_ENTRIES; i++, itct++) { __le64 *data = &itct->qw0; @@ -3761,6 +3820,9 @@ static int debugfs_itct_cache_v3_hw_show(struct seq_file *s, void *p) int i, tab_idx; __le64 *itct; + if (!debugfs_dump_is_generated_v3_hw(itct_cache)) + return -EPERM; + for (i = 0; i < HISI_SAS_IOST_ITCT_CACHE_NUM; i++, itct_cache++) { /* * Data struct of ITCT cache: @@ -3778,10 +3840,9 @@ static int debugfs_itct_cache_v3_hw_show(struct seq_file *s, void *p) } DEFINE_SHOW_ATTRIBUTE(debugfs_itct_cache_v3_hw); -static void debugfs_create_files_v3_hw(struct hisi_hba *hisi_hba) +static void debugfs_create_files_v3_hw(struct hisi_hba *hisi_hba, int index) { u64 *debugfs_timestamp; - int dump_index = hisi_hba->debugfs_dump_index; struct dentry *dump_dentry; struct dentry *dentry; char name[256]; @@ -3789,17 +3850,17 @@ static void debugfs_create_files_v3_hw(struct hisi_hba *hisi_hba) int c; int d; - snprintf(name, 256, "%d", dump_index); + snprintf(name, 256, "%d", index); dump_dentry = debugfs_create_dir(name, hisi_hba->debugfs_dump_dentry); - debugfs_timestamp = &hisi_hba->debugfs_timestamp[dump_index]; + debugfs_timestamp = &hisi_hba->debugfs_timestamp[index]; debugfs_create_u64("timestamp", 0400, dump_dentry, debugfs_timestamp); debugfs_create_file("global", 0400, dump_dentry, - &hisi_hba->debugfs_regs[dump_index][DEBUGFS_GLOBAL], + &hisi_hba->debugfs_regs[index][DEBUGFS_GLOBAL], &debugfs_global_v3_hw_fops); /* Create port dir and files */ @@ -3808,7 +3869,7 @@ static void debugfs_create_files_v3_hw(struct hisi_hba *hisi_hba) snprintf(name, 256, "%d", p); debugfs_create_file(name, 0400, dentry, - &hisi_hba->debugfs_port_reg[dump_index][p], + &hisi_hba->debugfs_port_reg[index][p], &debugfs_port_v3_hw_fops); } @@ -3818,7 +3879,7 @@ static void debugfs_create_files_v3_hw(struct hisi_hba *hisi_hba) snprintf(name, 256, "%d", c); debugfs_create_file(name, 0400, dentry, - &hisi_hba->debugfs_cq[dump_index][c], + &hisi_hba->debugfs_cq[index][c], &debugfs_cq_v3_hw_fops); } @@ -3828,32 +3889,32 @@ static void debugfs_create_files_v3_hw(struct hisi_hba *hisi_hba) snprintf(name, 256, "%d", d); debugfs_create_file(name, 0400, dentry, - &hisi_hba->debugfs_dq[dump_index][d], + &hisi_hba->debugfs_dq[index][d], &debugfs_dq_v3_hw_fops); } debugfs_create_file("iost", 0400, dump_dentry, - &hisi_hba->debugfs_iost[dump_index], + &hisi_hba->debugfs_iost[index], &debugfs_iost_v3_hw_fops); debugfs_create_file("iost_cache", 0400, dump_dentry, - &hisi_hba->debugfs_iost_cache[dump_index], + &hisi_hba->debugfs_iost_cache[index], &debugfs_iost_cache_v3_hw_fops); debugfs_create_file("itct", 0400, dump_dentry, - &hisi_hba->debugfs_itct[dump_index], + &hisi_hba->debugfs_itct[index], &debugfs_itct_v3_hw_fops); debugfs_create_file("itct_cache", 0400, dump_dentry, - &hisi_hba->debugfs_itct_cache[dump_index], + &hisi_hba->debugfs_itct_cache[index], &debugfs_itct_cache_v3_hw_fops); debugfs_create_file("axi", 0400, dump_dentry, - &hisi_hba->debugfs_regs[dump_index][DEBUGFS_AXI], + &hisi_hba->debugfs_regs[index][DEBUGFS_AXI], &debugfs_axi_v3_hw_fops); debugfs_create_file("ras", 0400, dump_dentry, - &hisi_hba->debugfs_regs[dump_index][DEBUGFS_RAS], + &hisi_hba->debugfs_regs[index][DEBUGFS_RAS], &debugfs_ras_v3_hw_fops); } @@ -4516,22 +4577,34 @@ static void debugfs_release_v3_hw(struct hisi_hba *hisi_hba, int dump_index) int i; devm_kfree(dev, hisi_hba->debugfs_iost_cache[dump_index].cache); + hisi_hba->debugfs_iost_cache[dump_index].cache = NULL; devm_kfree(dev, hisi_hba->debugfs_itct_cache[dump_index].cache); + hisi_hba->debugfs_itct_cache[dump_index].cache = NULL; devm_kfree(dev, hisi_hba->debugfs_iost[dump_index].iost); + hisi_hba->debugfs_iost[dump_index].iost = NULL; devm_kfree(dev, hisi_hba->debugfs_itct[dump_index].itct); + hisi_hba->debugfs_itct[dump_index].itct = NULL; - for (i = 0; i < hisi_hba->queue_count; i++) + for (i = 0; i < hisi_hba->queue_count; i++) { devm_kfree(dev, hisi_hba->debugfs_dq[dump_index][i].hdr); + hisi_hba->debugfs_dq[dump_index][i].hdr = NULL; + } - for (i = 0; i < hisi_hba->queue_count; i++) + for (i = 0; i < hisi_hba->queue_count; i++) { devm_kfree(dev, hisi_hba->debugfs_cq[dump_index][i].complete_hdr); + hisi_hba->debugfs_cq[dump_index][i].complete_hdr = NULL; + } - for (i = 0; i < DEBUGFS_REGS_NUM; i++) + for (i = 0; i < DEBUGFS_REGS_NUM; i++) { devm_kfree(dev, hisi_hba->debugfs_regs[dump_index][i].data); + hisi_hba->debugfs_regs[dump_index][i].data = NULL; + } - for (i = 0; i < hisi_hba->n_phy; i++) + for (i = 0; i < hisi_hba->n_phy; i++) { devm_kfree(dev, hisi_hba->debugfs_port_reg[dump_index][i].data); + hisi_hba->debugfs_port_reg[dump_index][i].data = NULL; + } } static const struct hisi_sas_debugfs_reg *debugfs_reg_array_v3_hw[DEBUGFS_REGS_NUM] = { @@ -4658,8 +4731,6 @@ static int debugfs_snapshot_regs_v3_hw(struct hisi_hba *hisi_hba) debugfs_snapshot_itct_reg_v3_hw(hisi_hba); debugfs_snapshot_iost_reg_v3_hw(hisi_hba); - debugfs_create_files_v3_hw(hisi_hba); - debugfs_snapshot_restore_v3_hw(hisi_hba); hisi_hba->debugfs_dump_index++; @@ -4743,6 +4814,34 @@ static void debugfs_bist_init_v3_hw(struct hisi_hba *hisi_hba) hisi_hba->debugfs_bist_linkrate = SAS_LINK_RATE_1_5_GBPS; } +static int debugfs_dump_index_v3_hw_show(struct seq_file *s, void *p) +{ + int *debugfs_dump_index = s->private; + + if (*debugfs_dump_index > 0) + seq_printf(s, "%d\n", *debugfs_dump_index - 1); + else + seq_puts(s, "dump not triggered\n"); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(debugfs_dump_index_v3_hw); + +static void debugfs_dump_init_v3_hw(struct hisi_hba *hisi_hba) +{ + int i; + + hisi_hba->debugfs_dump_dentry = + debugfs_create_dir("dump", hisi_hba->debugfs_dir); + + debugfs_create_file("latest_dump", 0400, hisi_hba->debugfs_dump_dentry, + &hisi_hba->debugfs_dump_index, + &debugfs_dump_index_v3_hw_fops); + + for (i = 0; i < hisi_sas_debugfs_dump_count; i++) + debugfs_create_files_v3_hw(hisi_hba, i); +} + static void debugfs_exit_v3_hw(struct hisi_hba *hisi_hba) { debugfs_remove_recursive(hisi_hba->debugfs_dir); @@ -4755,19 +4854,17 @@ static void debugfs_init_v3_hw(struct hisi_hba *hisi_hba) hisi_hba->debugfs_dir = debugfs_create_dir(dev_name(dev), hisi_sas_debugfs_dir); - debugfs_create_file("trigger_dump", 0200, - hisi_hba->debugfs_dir, - hisi_hba, - &debugfs_trigger_dump_v3_hw_fops); - /* create bist structures */ debugfs_bist_init_v3_hw(hisi_hba); - hisi_hba->debugfs_dump_dentry = - debugfs_create_dir("dump", hisi_hba->debugfs_dir); + debugfs_dump_init_v3_hw(hisi_hba); debugfs_phy_down_cnt_init_v3_hw(hisi_hba); debugfs_fifo_init_v3_hw(hisi_hba); + debugfs_create_file("trigger_dump", 0200, + hisi_hba->debugfs_dir, + hisi_hba, + &debugfs_trigger_dump_v3_hw_fops); } static int @@ -4860,16 +4957,13 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id) SHOST_DIX_GUARD_CRC); } - if (hisi_sas_debugfs_enable) - debugfs_init_v3_hw(hisi_hba); - rc = interrupt_preinit_v3_hw(hisi_hba); if (rc) - goto err_out_undo_debugfs; + goto err_out_free_host; rc = scsi_add_host(shost, dev); if (rc) - goto err_out_undo_debugfs; + goto err_out_free_host; rc = sas_register_ha(sha); if (rc) @@ -4880,6 +4974,8 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto err_out_unregister_ha; scsi_scan_host(shost); + if (hisi_sas_debugfs_enable) + debugfs_init_v3_hw(hisi_hba); pm_runtime_set_autosuspend_delay(dev, 5000); pm_runtime_use_autosuspend(dev); @@ -4900,9 +4996,6 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id) sas_unregister_ha(sha); err_out_remove_host: scsi_remove_host(shost); -err_out_undo_debugfs: - if (hisi_sas_debugfs_enable) - debugfs_exit_v3_hw(hisi_hba); err_out_free_host: hisi_sas_free(hisi_hba); scsi_host_put(shost); @@ -4934,6 +5027,8 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev) struct Scsi_Host *shost = sha->shost; pm_runtime_get_noresume(dev); + if (hisi_sas_debugfs_enable) + debugfs_exit_v3_hw(hisi_hba); sas_unregister_ha(sha); flush_workqueue(hisi_hba->wq); @@ -4941,9 +5036,6 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev) hisi_sas_v3_destroy_irqs(pdev, hisi_hba); hisi_sas_free(hisi_hba); - if (hisi_sas_debugfs_enable) - debugfs_exit_v3_hw(hisi_hba); - scsi_host_put(shost); } @@ -5034,7 +5126,8 @@ static int _suspend_v3_hw(struct device *device) interrupt_disable_v3_hw(hisi_hba); #ifdef CONFIG_PM - if (atomic_read(&device->power.usage_count)) { + if ((device->power.runtime_status == RPM_SUSPENDING) && + atomic_read(&device->power.usage_count)) { dev_err(dev, "PM suspend: host status cannot be suspended\n"); rc = -EBUSY; goto err_out; diff --git a/drivers/scsi/jazz_esp.c b/drivers/scsi/jazz_esp.c index fb04b0b515ab..35137f5cfb3a 100644 --- a/drivers/scsi/jazz_esp.c +++ b/drivers/scsi/jazz_esp.c @@ -196,7 +196,7 @@ MODULE_ALIAS("platform:jazz_esp"); static struct platform_driver esp_jazz_driver = { .probe = esp_jazz_probe, - .remove_new = esp_jazz_remove, + .remove = esp_jazz_remove, .driver = { .name = "jazz_esp", }, diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index 85059b83ea6b..1c6b024160da 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c @@ -398,7 +398,11 @@ lpfc_bsg_send_mgmt_cmd(struct bsg_job *job) /* in case no data is transferred */ bsg_reply->reply_payload_rcv_len = 0; - if (ndlp->nlp_flag & NLP_ELS_SND_MASK) + if (test_bit(NLP_PLOGI_SND, &ndlp->nlp_flag) || + test_bit(NLP_PRLI_SND, &ndlp->nlp_flag) || + test_bit(NLP_ADISC_SND, &ndlp->nlp_flag) || + test_bit(NLP_LOGO_SND, &ndlp->nlp_flag) || + test_bit(NLP_RNID_SND, &ndlp->nlp_flag)) return -ENODEV; /* allocate our bsg tracking structure */ diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index d4e46a08f94d..efeb61b15a5b 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -571,7 +571,7 @@ int lpfc_issue_reg_vfi(struct lpfc_vport *); int lpfc_issue_unreg_vfi(struct lpfc_vport *); int lpfc_selective_reset(struct lpfc_hba *); int lpfc_sli4_read_config(struct lpfc_hba *); -void lpfc_sli4_node_prep(struct lpfc_hba *); +void lpfc_sli4_node_rpi_restore(struct lpfc_hba *phba); int lpfc_sli4_els_sgl_update(struct lpfc_hba *phba); int lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba); int lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *sglist); @@ -660,6 +660,7 @@ void lpfc_wqe_cmd_template(void); void lpfc_nvmet_cmd_template(void); void lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, uint32_t stat, uint32_t param); +void lpfc_nvmels_flush_cmd(struct lpfc_hba *phba); extern int lpfc_enable_nvmet_cnt; extern unsigned long long lpfc_enable_nvmet[]; extern int lpfc_no_hba_reset_cnt; diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 134bc96dd134..30891ad17e2a 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -735,7 +735,7 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type) lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0238 Process x%06x NameServer Rsp " - "Data: x%x x%x x%x x%lx x%x\n", Did, + "Data: x%lx x%x x%x x%lx x%x\n", Did, ndlp->nlp_flag, ndlp->nlp_fc4_type, ndlp->nlp_state, vport->fc_flag, vport->fc_rscn_id_cnt); @@ -744,7 +744,7 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type) * state of ndlp hit devloss, change state to * allow rediscovery. */ - if (ndlp->nlp_flag & NLP_NPR_2B_DISC && + if (test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag) && ndlp->nlp_state == NLP_STE_UNUSED_NODE) { lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); @@ -832,12 +832,10 @@ lpfc_ns_rsp_audit_did(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type) if (ndlp->nlp_type != NLP_NVME_INITIATOR || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) continue; - spin_lock_irq(&ndlp->lock); if (ndlp->nlp_DID == Did) - ndlp->nlp_flag &= ~NLP_NVMET_RECOV; + clear_bit(NLP_NVMET_RECOV, &ndlp->nlp_flag); else - ndlp->nlp_flag |= NLP_NVMET_RECOV; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_NVMET_RECOV, &ndlp->nlp_flag); } } } @@ -894,13 +892,11 @@ lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint8_t fc4_type, */ if (vport->phba->nvmet_support) { list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { - if (!(ndlp->nlp_flag & NLP_NVMET_RECOV)) + if (!test_bit(NLP_NVMET_RECOV, &ndlp->nlp_flag)) continue; lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RECOVERY); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_NVMET_RECOV; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_NVMET_RECOV, &ndlp->nlp_flag); } } @@ -1440,7 +1436,7 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, if (ndlp) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0242 Process x%x GFF " - "NameServer Rsp Data: x%x x%lx x%x\n", + "NameServer Rsp Data: x%lx x%lx x%x\n", did, ndlp->nlp_flag, vport->fc_flag, vport->fc_rscn_id_cnt); } else { @@ -2226,6 +2222,11 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, ulp_status, ulp_word4, latt); if (latt || ulp_status) { + lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY, + "0229 FDMI cmd %04x failed, latt = %d " + "ulp_status: (x%x/x%x), sli_flag x%x\n", + be16_to_cpu(fdmi_cmd), latt, ulp_status, + ulp_word4, phba->sli.sli_flag); /* Look for a retryable error */ if (ulp_status == IOSTAT_LOCAL_REJECT) { @@ -2234,8 +2235,16 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, case IOERR_SLI_DOWN: /* Driver aborted this IO. No retry as error * is likely Offline->Online or some adapter - * error. Recovery will try again. + * error. Recovery will try again, but if port + * is not active there's no point to continue + * issuing follow up FDMI commands. */ + if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) { + free_ndlp = cmdiocb->ndlp; + lpfc_ct_free_iocb(phba, cmdiocb); + lpfc_nlp_put(free_ndlp); + return; + } break; case IOERR_ABORT_IN_PROGRESS: case IOERR_SEQUENCE_TIMEOUT: @@ -2256,12 +2265,6 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, break; } } - - lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, - "0229 FDMI cmd %04x latt = %d " - "ulp_status: x%x, rid x%x\n", - be16_to_cpu(fdmi_cmd), latt, ulp_status, - ulp_word4); } free_ndlp = cmdiocb->ndlp; diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index a2d2b02b3418..3fd1aa5cc78c 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -870,8 +870,8 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size) wwn_to_u64(ndlp->nlp_nodename.u.wwn)); len += scnprintf(buf+len, size-len, "RPI:x%04x ", ndlp->nlp_rpi); - len += scnprintf(buf+len, size-len, "flag:x%08x ", - ndlp->nlp_flag); + len += scnprintf(buf+len, size-len, "flag:x%08lx ", + ndlp->nlp_flag); if (!ndlp->nlp_type) len += scnprintf(buf+len, size-len, "UNKNOWN_TYPE "); if (ndlp->nlp_type & NLP_FC_NODE) diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h index f5ae8cc15820..3e173b5d00e0 100644 --- a/drivers/scsi/lpfc/lpfc_disc.h +++ b/drivers/scsi/lpfc/lpfc_disc.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2013 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -102,7 +102,7 @@ struct lpfc_nodelist { spinlock_t lock; /* Node management lock */ - uint32_t nlp_flag; /* entry flags */ + unsigned long nlp_flag; /* entry flags */ uint32_t nlp_DID; /* FC D_ID of entry */ uint32_t nlp_last_elscmd; /* Last ELS cmd sent */ uint16_t nlp_type; @@ -182,37 +182,37 @@ struct lpfc_node_rrq { #define lpfc_ndlp_check_qdepth(phba, ndlp) \ (ndlp->cmd_qdepth < phba->sli4_hba.max_cfg_param.max_xri) -/* Defines for nlp_flag (uint32) */ -#define NLP_IGNR_REG_CMPL 0x00000001 /* Rcvd rscn before we cmpl reg login */ -#define NLP_REG_LOGIN_SEND 0x00000002 /* sent reglogin to adapter */ -#define NLP_RELEASE_RPI 0x00000004 /* Release RPI to free pool */ -#define NLP_SUPPRESS_RSP 0x00000010 /* Remote NPort supports suppress rsp */ -#define NLP_PLOGI_SND 0x00000020 /* sent PLOGI request for this entry */ -#define NLP_PRLI_SND 0x00000040 /* sent PRLI request for this entry */ -#define NLP_ADISC_SND 0x00000080 /* sent ADISC request for this entry */ -#define NLP_LOGO_SND 0x00000100 /* sent LOGO request for this entry */ -#define NLP_RNID_SND 0x00000400 /* sent RNID request for this entry */ -#define NLP_ELS_SND_MASK 0x000007e0 /* sent ELS request for this entry */ -#define NLP_NVMET_RECOV 0x00001000 /* NVMET auditing node for recovery. */ -#define NLP_UNREG_INP 0x00008000 /* UNREG_RPI cmd is in progress */ -#define NLP_DROPPED 0x00010000 /* Init ref count has been dropped */ -#define NLP_DELAY_TMO 0x00020000 /* delay timeout is running for node */ -#define NLP_NPR_2B_DISC 0x00040000 /* node is included in num_disc_nodes */ -#define NLP_RCV_PLOGI 0x00080000 /* Rcv'ed PLOGI from remote system */ -#define NLP_LOGO_ACC 0x00100000 /* Process LOGO after ACC completes */ -#define NLP_TGT_NO_SCSIID 0x00200000 /* good PRLI but no binding for scsid */ -#define NLP_ISSUE_LOGO 0x00400000 /* waiting to issue a LOGO */ -#define NLP_IN_DEV_LOSS 0x00800000 /* devloss in progress */ -#define NLP_ACC_REGLOGIN 0x01000000 /* Issue Reg Login after successful +/* nlp_flag mask bits */ +enum lpfc_nlp_flag { + NLP_IGNR_REG_CMPL = 0, /* Rcvd rscn before we cmpl reg login */ + NLP_REG_LOGIN_SEND = 1, /* sent reglogin to adapter */ + NLP_SUPPRESS_RSP = 4, /* Remote NPort supports suppress rsp */ + NLP_PLOGI_SND = 5, /* sent PLOGI request for this entry */ + NLP_PRLI_SND = 6, /* sent PRLI request for this entry */ + NLP_ADISC_SND = 7, /* sent ADISC request for this entry */ + NLP_LOGO_SND = 8, /* sent LOGO request for this entry */ + NLP_RNID_SND = 10, /* sent RNID request for this entry */ + NLP_NVMET_RECOV = 12, /* NVMET auditing node for recovery. */ + NLP_UNREG_INP = 15, /* UNREG_RPI cmd is in progress */ + NLP_DROPPED = 16, /* Init ref count has been dropped */ + NLP_DELAY_TMO = 17, /* delay timeout is running for node */ + NLP_NPR_2B_DISC = 18, /* node is included in num_disc_nodes */ + NLP_RCV_PLOGI = 19, /* Rcv'ed PLOGI from remote system */ + NLP_LOGO_ACC = 20, /* Process LOGO after ACC completes */ + NLP_TGT_NO_SCSIID = 21, /* good PRLI but no binding for scsid */ + NLP_ISSUE_LOGO = 22, /* waiting to issue a LOGO */ + NLP_IN_DEV_LOSS = 23, /* devloss in progress */ + NLP_ACC_REGLOGIN = 24, /* Issue Reg Login after successful ACC */ -#define NLP_NPR_ADISC 0x02000000 /* Issue ADISC when dq'ed from + NLP_NPR_ADISC = 25, /* Issue ADISC when dq'ed from NPR list */ -#define NLP_RM_DFLT_RPI 0x04000000 /* need to remove leftover dflt RPI */ -#define NLP_NODEV_REMOVE 0x08000000 /* Defer removal till discovery ends */ -#define NLP_TARGET_REMOVE 0x10000000 /* Target remove in process */ -#define NLP_SC_REQ 0x20000000 /* Target requires authentication */ -#define NLP_FIRSTBURST 0x40000000 /* Target supports FirstBurst */ -#define NLP_RPI_REGISTERED 0x80000000 /* nlp_rpi is valid */ + NLP_RM_DFLT_RPI = 26, /* need to remove leftover dflt RPI */ + NLP_NODEV_REMOVE = 27, /* Defer removal till discovery ends */ + NLP_TARGET_REMOVE = 28, /* Target remove in process */ + NLP_SC_REQ = 29, /* Target requires authentication */ + NLP_FIRSTBURST = 30, /* Target supports FirstBurst */ + NLP_RPI_REGISTERED = 31 /* nlp_rpi is valid */ +}; /* There are 4 different double linked lists nodelist entries can reside on. * The Port Login (PLOGI) list and Address Discovery (ADISC) list are used diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index d737b897ddd8..37f0a930d469 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -725,11 +725,9 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, list_for_each_entry_safe(np, next_np, &vport->fc_nodes, nlp_listp) { if ((np->nlp_state != NLP_STE_NPR_NODE) || - !(np->nlp_flag & NLP_NPR_ADISC)) + !test_bit(NLP_NPR_ADISC, &np->nlp_flag)) continue; - spin_lock_irq(&np->lock); - np->nlp_flag &= ~NLP_NPR_ADISC; - spin_unlock_irq(&np->lock); + clear_bit(NLP_NPR_ADISC, &np->nlp_flag); lpfc_unreg_rpi(vport, np); } lpfc_cleanup_pending_mbox(vport); @@ -864,9 +862,7 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, sizeof(struct lpfc_name)); /* Set state will put ndlp onto node list if not already done */ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) @@ -1018,7 +1014,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, * registered with the SCSI transport, remove the initial * reference to trigger node release. */ - if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS) && + if (!test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag) && !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) lpfc_nlp_put(ndlp); @@ -1236,9 +1232,9 @@ lpfc_cmpl_els_link_down(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, lpfc_printf_log(phba, KERN_INFO, LOG_ELS, "6445 ELS completes after LINK_DOWN: " - " Status %x/%x cmd x%x flg x%x\n", + " Status %x/%x cmd x%x flg x%x iotag x%x\n", ulp_status, ulp_word4, cmd, - cmdiocb->cmd_flag); + cmdiocb->cmd_flag, cmdiocb->iotag); if (cmdiocb->cmd_flag & LPFC_IO_FABRIC) { cmdiocb->cmd_flag &= ~LPFC_IO_FABRIC; @@ -1548,7 +1544,7 @@ lpfc_initial_flogi(struct lpfc_vport *vport) * Otherwise, decrement node reference to trigger release. */ if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) && - !(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) + !test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag)) lpfc_nlp_put(ndlp); return 0; } @@ -1597,7 +1593,7 @@ lpfc_initial_fdisc(struct lpfc_vport *vport) * Otherwise, decrement node reference to trigger release. */ if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) && - !(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) + !test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag)) lpfc_nlp_put(ndlp); return 0; } @@ -1675,9 +1671,9 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, struct lpfc_nodelist *new_ndlp; struct serv_parm *sp; uint8_t name[sizeof(struct lpfc_name)]; - uint32_t keepDID = 0, keep_nlp_flag = 0; + uint32_t keepDID = 0; int rc; - uint32_t keep_new_nlp_flag = 0; + unsigned long keep_nlp_flag = 0, keep_new_nlp_flag = 0; uint16_t keep_nlp_state; u32 keep_nlp_fc4_type = 0; struct lpfc_nvme_rport *keep_nrport = NULL; @@ -1704,8 +1700,8 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, } lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE, - "3178 PLOGI confirm: ndlp x%x x%x x%x: " - "new_ndlp x%x x%x x%x\n", + "3178 PLOGI confirm: ndlp x%x x%lx x%x: " + "new_ndlp x%x x%lx x%x\n", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_fc4_type, (new_ndlp ? new_ndlp->nlp_DID : 0), (new_ndlp ? new_ndlp->nlp_flag : 0), @@ -1769,48 +1765,48 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, new_ndlp->nlp_flag = ndlp->nlp_flag; /* if new_ndlp had NLP_UNREG_INP set, keep it */ - if (keep_new_nlp_flag & NLP_UNREG_INP) - new_ndlp->nlp_flag |= NLP_UNREG_INP; + if (test_bit(NLP_UNREG_INP, &keep_new_nlp_flag)) + set_bit(NLP_UNREG_INP, &new_ndlp->nlp_flag); else - new_ndlp->nlp_flag &= ~NLP_UNREG_INP; + clear_bit(NLP_UNREG_INP, &new_ndlp->nlp_flag); /* if new_ndlp had NLP_RPI_REGISTERED set, keep it */ - if (keep_new_nlp_flag & NLP_RPI_REGISTERED) - new_ndlp->nlp_flag |= NLP_RPI_REGISTERED; + if (test_bit(NLP_RPI_REGISTERED, &keep_new_nlp_flag)) + set_bit(NLP_RPI_REGISTERED, &new_ndlp->nlp_flag); else - new_ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; + clear_bit(NLP_RPI_REGISTERED, &new_ndlp->nlp_flag); /* * Retain the DROPPED flag. This will take care of the init * refcount when affecting the state change */ - if (keep_new_nlp_flag & NLP_DROPPED) - new_ndlp->nlp_flag |= NLP_DROPPED; + if (test_bit(NLP_DROPPED, &keep_new_nlp_flag)) + set_bit(NLP_DROPPED, &new_ndlp->nlp_flag); else - new_ndlp->nlp_flag &= ~NLP_DROPPED; + clear_bit(NLP_DROPPED, &new_ndlp->nlp_flag); ndlp->nlp_flag = keep_new_nlp_flag; /* if ndlp had NLP_UNREG_INP set, keep it */ - if (keep_nlp_flag & NLP_UNREG_INP) - ndlp->nlp_flag |= NLP_UNREG_INP; + if (test_bit(NLP_UNREG_INP, &keep_nlp_flag)) + set_bit(NLP_UNREG_INP, &ndlp->nlp_flag); else - ndlp->nlp_flag &= ~NLP_UNREG_INP; + clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag); /* if ndlp had NLP_RPI_REGISTERED set, keep it */ - if (keep_nlp_flag & NLP_RPI_REGISTERED) - ndlp->nlp_flag |= NLP_RPI_REGISTERED; + if (test_bit(NLP_RPI_REGISTERED, &keep_nlp_flag)) + set_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag); else - ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; + clear_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag); /* * Retain the DROPPED flag. This will take care of the init * refcount when affecting the state change */ - if (keep_nlp_flag & NLP_DROPPED) - ndlp->nlp_flag |= NLP_DROPPED; + if (test_bit(NLP_DROPPED, &keep_nlp_flag)) + set_bit(NLP_DROPPED, &ndlp->nlp_flag); else - ndlp->nlp_flag &= ~NLP_DROPPED; + clear_bit(NLP_DROPPED, &ndlp->nlp_flag); spin_unlock_irq(&new_ndlp->lock); spin_unlock_irq(&ndlp->lock); @@ -1888,7 +1884,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, phba->active_rrq_pool); lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE, - "3173 PLOGI confirm exit: new_ndlp x%x x%x x%x\n", + "3173 PLOGI confirm exit: new_ndlp x%x x%lx x%x\n", new_ndlp->nlp_DID, new_ndlp->nlp_flag, new_ndlp->nlp_fc4_type); @@ -2009,7 +2005,7 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, IOCB_t *irsp; struct lpfc_nodelist *ndlp, *free_ndlp; struct lpfc_dmabuf *prsp; - int disc; + bool disc; struct serv_parm *sp = NULL; u32 ulp_status, ulp_word4, did, iotag; bool release_node = false; @@ -2044,10 +2040,7 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* Since ndlp can be freed in the disc state machine, note if this node * is being used during discovery. */ - spin_lock_irq(&ndlp->lock); - disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + disc = test_and_clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); /* PLOGI completes to NPort */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, @@ -2060,9 +2053,7 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* Check to see if link went down during discovery */ if (lpfc_els_chk_latt(vport)) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); goto out; } @@ -2070,11 +2061,8 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* Check for retry */ if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { /* ELS command is being retried */ - if (disc) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); - } + if (disc) + set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); goto out; } /* Warn PLOGI status Don't print the vport to vport rjts */ @@ -2097,7 +2085,8 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, * with the reglogin process. */ spin_lock_irq(&ndlp->lock); - if ((ndlp->nlp_flag & (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI)) && + if ((test_bit(NLP_ACC_REGLOGIN, &ndlp->nlp_flag) || + test_bit(NLP_RCV_PLOGI, &ndlp->nlp_flag)) && ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE) { spin_unlock_irq(&ndlp->lock); goto out; @@ -2108,8 +2097,8 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, * start the device remove process. */ if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; - if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); + if (!test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag)) release_node = true; } spin_unlock_irq(&ndlp->lock); @@ -2212,12 +2201,13 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry) * outstanding UNREG_RPI mbox command completes, unless we * are going offline. This logic does not apply for Fabric DIDs */ - if ((ndlp->nlp_flag & (NLP_IGNR_REG_CMPL | NLP_UNREG_INP)) && + if ((test_bit(NLP_IGNR_REG_CMPL, &ndlp->nlp_flag) || + test_bit(NLP_UNREG_INP, &ndlp->nlp_flag)) && ((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) && !test_bit(FC_OFFLINE_MODE, &vport->fc_flag)) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "4110 Issue PLOGI x%x deferred " - "on NPort x%x rpi x%x flg x%x Data:" + "on NPort x%x rpi x%x flg x%lx Data:" " x%px\n", ndlp->nlp_defer_did, ndlp->nlp_DID, ndlp->nlp_rpi, ndlp->nlp_flag, ndlp); @@ -2335,10 +2325,10 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, ulp_status = get_job_ulpstatus(phba, rspiocb); ulp_word4 = get_job_word4(phba, rspiocb); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_PRLI_SND; + clear_bit(NLP_PRLI_SND, &ndlp->nlp_flag); /* Driver supports multiple FC4 types. Counters matter. */ + spin_lock_irq(&ndlp->lock); vport->fc_prli_sent--; ndlp->fc4_prli_sent--; spin_unlock_irq(&ndlp->lock); @@ -2379,7 +2369,7 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* Warn PRLI status */ lpfc_printf_vlog(vport, mode, LOG_ELS, "2754 PRLI DID:%06X Status:x%x/x%x, " - "data: x%x x%x x%x\n", + "data: x%x x%x x%lx\n", ndlp->nlp_DID, ulp_status, ulp_word4, ndlp->nlp_state, ndlp->fc4_prli_sent, ndlp->nlp_flag); @@ -2396,10 +2386,10 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, if ((ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE && ndlp->nlp_state <= NLP_STE_REG_LOGIN_ISSUE) || (ndlp->nlp_state == NLP_STE_NPR_NODE && - ndlp->nlp_flag & NLP_DELAY_TMO)) { - lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, + test_bit(NLP_DELAY_TMO, &ndlp->nlp_flag))) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, "2784 PRLI cmpl: Allow Node recovery " - "DID x%06x nstate x%x nflag x%x\n", + "DID x%06x nstate x%x nflag x%lx\n", ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag); goto out; @@ -2420,8 +2410,8 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, spin_lock_irq(&ndlp->lock); if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) && !ndlp->fc4_prli_sent) { - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; - if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); + if (!test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag)) release_node = true; } spin_unlock_irq(&ndlp->lock); @@ -2496,7 +2486,8 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR); ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; - ndlp->nlp_flag &= ~(NLP_FIRSTBURST | NLP_NPR_2B_DISC); + clear_bit(NLP_FIRSTBURST, &ndlp->nlp_flag); + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); ndlp->nvme_fb_size = 0; send_next_prli: @@ -2627,8 +2618,8 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, * the ndlp is used to track outstanding PRLIs for different * FC4 types. */ + set_bit(NLP_PRLI_SND, &ndlp->nlp_flag); spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_PRLI_SND; vport->fc_prli_sent++; ndlp->fc4_prli_sent++; spin_unlock_irq(&ndlp->lock); @@ -2789,7 +2780,7 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_vport *vport = cmdiocb->vport; IOCB_t *irsp; struct lpfc_nodelist *ndlp; - int disc; + bool disc; u32 ulp_status, ulp_word4, tmo, iotag; bool release_node = false; @@ -2818,10 +2809,8 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* Since ndlp can be freed in the disc state machine, note if this node * is being used during discovery. */ - spin_lock_irq(&ndlp->lock); - disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); - ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC); - spin_unlock_irq(&ndlp->lock); + disc = test_and_clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); + clear_bit(NLP_ADISC_SND, &ndlp->nlp_flag); /* ADISC completes to NPort */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0104 ADISC completes to NPort x%x " @@ -2832,9 +2821,7 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* Check to see if link went down during discovery */ if (lpfc_els_chk_latt(vport)) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); goto out; } @@ -2843,9 +2830,7 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { /* ELS command is being retried */ if (disc) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); lpfc_set_disctmo(vport); } goto out; @@ -2864,8 +2849,8 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, */ spin_lock_irq(&ndlp->lock); if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; - if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); + if (!test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag)) release_node = true; } spin_unlock_irq(&ndlp->lock); @@ -2938,9 +2923,7 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, phba->fc_stat.elsXmitADISC++; elsiocb->cmd_cmpl = lpfc_cmpl_els_adisc; - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_ADISC_SND; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_ADISC_SND, &ndlp->nlp_flag); elsiocb->ndlp = lpfc_nlp_get(ndlp); if (!elsiocb->ndlp) { lpfc_els_free_iocb(phba, elsiocb); @@ -2961,9 +2944,7 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, return 0; err: - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_ADISC_SND; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_ADISC_SND, &ndlp->nlp_flag); return 1; } @@ -2985,7 +2966,6 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_nodelist *ndlp = cmdiocb->ndlp; struct lpfc_vport *vport = ndlp->vport; IOCB_t *irsp; - unsigned long flags; uint32_t skip_recovery = 0; int wake_up_waiter = 0; u32 ulp_status; @@ -3007,8 +2987,8 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, iotag = irsp->ulpIoTag; } + clear_bit(NLP_LOGO_SND, &ndlp->nlp_flag); spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_LOGO_SND; if (ndlp->save_flags & NLP_WAIT_FOR_LOGO) { wake_up_waiter = 1; ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO; @@ -3023,7 +3003,7 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* LOGO completes to NPort */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0105 LOGO completes to NPort x%x " - "IoTag x%x refcnt %d nflags x%x xflags x%x " + "IoTag x%x refcnt %d nflags x%lx xflags x%x " "Data: x%x x%x x%x x%x\n", ndlp->nlp_DID, iotag, kref_read(&ndlp->kref), ndlp->nlp_flag, @@ -3061,12 +3041,8 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* The driver sets this flag for an NPIV instance that doesn't want to * log into the remote port. */ - if (ndlp->nlp_flag & NLP_TARGET_REMOVE) { - spin_lock_irq(&ndlp->lock); - if (phba->sli_rev == LPFC_SLI_REV4) - ndlp->nlp_flag |= NLP_RELEASE_RPI; - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + if (test_bit(NLP_TARGET_REMOVE, &ndlp->nlp_flag)) { + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_DEVICE_RM); goto out_rsrc_free; @@ -3089,9 +3065,7 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET) && skip_recovery == 0) { lpfc_cancel_retry_delay_tmo(vport, ndlp); - spin_lock_irqsave(&ndlp->lock, flags); - ndlp->nlp_flag |= NLP_NPR_2B_DISC; - spin_unlock_irqrestore(&ndlp->lock, flags); + set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "3187 LOGO completes to NPort x%x: Start " @@ -3113,9 +3087,7 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, * register with the transport. */ if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_DEVICE_RM); } @@ -3156,12 +3128,8 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, uint16_t cmdsize; int rc; - spin_lock_irq(&ndlp->lock); - if (ndlp->nlp_flag & NLP_LOGO_SND) { - spin_unlock_irq(&ndlp->lock); + if (test_bit(NLP_LOGO_SND, &ndlp->nlp_flag)) return 0; - } - spin_unlock_irq(&ndlp->lock); cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name); elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, @@ -3180,10 +3148,8 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, phba->fc_stat.elsXmitLOGO++; elsiocb->cmd_cmpl = lpfc_cmpl_els_logo; - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_LOGO_SND; - ndlp->nlp_flag &= ~NLP_ISSUE_LOGO; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_LOGO_SND, &ndlp->nlp_flag); + clear_bit(NLP_ISSUE_LOGO, &ndlp->nlp_flag); elsiocb->ndlp = lpfc_nlp_get(ndlp); if (!elsiocb->ndlp) { lpfc_els_free_iocb(phba, elsiocb); @@ -3208,9 +3174,7 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, return 0; err: - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_LOGO_SND; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_LOGO_SND, &ndlp->nlp_flag); return 1; } @@ -3286,13 +3250,13 @@ lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, static int lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp) { - int rc = 0; + int rc; struct lpfc_hba *phba = vport->phba; struct lpfc_nodelist *ns_ndlp; LPFC_MBOXQ_t *mbox; - if (fc_ndlp->nlp_flag & NLP_RPI_REGISTERED) - return rc; + if (test_bit(NLP_RPI_REGISTERED, &fc_ndlp->nlp_flag)) + return 0; ns_ndlp = lpfc_findnode_did(vport, NameServer_DID); if (!ns_ndlp) @@ -3309,7 +3273,7 @@ lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp) if (!mbox) { lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, "0936 %s: no memory for reg_login " - "Data: x%x x%x x%x x%x\n", __func__, + "Data: x%x x%x x%lx x%x\n", __func__, fc_ndlp->nlp_DID, fc_ndlp->nlp_state, fc_ndlp->nlp_flag, fc_ndlp->nlp_rpi); return -ENOMEM; @@ -3321,7 +3285,7 @@ lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp) goto out; } - fc_ndlp->nlp_flag |= NLP_REG_LOGIN_SEND; + set_bit(NLP_REG_LOGIN_SEND, &fc_ndlp->nlp_flag); mbox->mbox_cmpl = lpfc_mbx_cmpl_fc_reg_login; mbox->ctx_ndlp = lpfc_nlp_get(fc_ndlp); if (!mbox->ctx_ndlp) { @@ -3345,7 +3309,7 @@ lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp) lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, "0938 %s: failed to format reg_login " - "Data: x%x x%x x%x x%x\n", __func__, + "Data: x%x x%x x%lx x%x\n", __func__, fc_ndlp->nlp_DID, fc_ndlp->nlp_state, fc_ndlp->nlp_flag, fc_ndlp->nlp_rpi); return rc; @@ -4384,11 +4348,8 @@ lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp) { struct lpfc_work_evt *evtp; - if (!(nlp->nlp_flag & NLP_DELAY_TMO)) + if (!test_and_clear_bit(NLP_DELAY_TMO, &nlp->nlp_flag)) return; - spin_lock_irq(&nlp->lock); - nlp->nlp_flag &= ~NLP_DELAY_TMO; - spin_unlock_irq(&nlp->lock); del_timer_sync(&nlp->nlp_delayfunc); nlp->nlp_last_elscmd = 0; if (!list_empty(&nlp->els_retry_evt.evt_listp)) { @@ -4397,10 +4358,7 @@ lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp) evtp = &nlp->els_retry_evt; lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1); } - if (nlp->nlp_flag & NLP_NPR_2B_DISC) { - spin_lock_irq(&nlp->lock); - nlp->nlp_flag &= ~NLP_NPR_2B_DISC; - spin_unlock_irq(&nlp->lock); + if (test_and_clear_bit(NLP_NPR_2B_DISC, &nlp->nlp_flag)) { if (vport->num_disc_nodes) { if (vport->port_state < LPFC_VPORT_READY) { /* Check if there are more ADISCs to be sent */ @@ -4480,14 +4438,11 @@ lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp) spin_lock_irq(&ndlp->lock); cmd = ndlp->nlp_last_elscmd; ndlp->nlp_last_elscmd = 0; + spin_unlock_irq(&ndlp->lock); - if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { - spin_unlock_irq(&ndlp->lock); + if (!test_and_clear_bit(NLP_DELAY_TMO, &ndlp->nlp_flag)) return; - } - ndlp->nlp_flag &= ~NLP_DELAY_TMO; - spin_unlock_irq(&ndlp->lock); /* * If a discovery event readded nlp_delayfunc after timer * firing and before processing the timer, cancel the @@ -5010,9 +4965,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* delay is specified in milliseconds */ mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(delay)); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_DELAY_TMO; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); ndlp->nlp_prev_state = ndlp->nlp_state; if ((cmd == ELS_CMD_PRLI) || @@ -5072,7 +5025,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0108 No retry ELS command x%x to remote " "NPORT x%x Retried:%d Error:x%x/%x " - "IoTag x%x nflags x%x\n", + "IoTag x%x nflags x%lx\n", cmd, did, cmdiocb->retry, ulp_status, ulp_word4, cmdiocb->iotag, (ndlp ? ndlp->nlp_flag : 0)); @@ -5239,7 +5192,7 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* ACC to LOGO completes to NPort */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0109 ACC to LOGO completes to NPort x%x refcnt %d " - "last els x%x Data: x%x x%x x%x\n", + "last els x%x Data: x%lx x%x x%x\n", ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_last_elscmd, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); @@ -5254,16 +5207,14 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, goto out; if (ndlp->nlp_state == NLP_STE_NPR_NODE) { - if (ndlp->nlp_flag & NLP_RPI_REGISTERED) + if (test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag)) lpfc_unreg_rpi(vport, ndlp); /* If came from PRLO, then PRLO_ACC is done. * Start rediscovery now. */ if (ndlp->nlp_last_elscmd == ELS_CMD_PRLO) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); ndlp->nlp_prev_state = ndlp->nlp_state; lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); @@ -5300,7 +5251,7 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) if (ndlp) { lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, - "0006 rpi x%x DID:%x flg:%x %d x%px " + "0006 rpi x%x DID:%x flg:%lx %d x%px " "mbx_cmd x%x mbx_flag x%x x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref), ndlp, mbx_cmd, @@ -5311,11 +5262,9 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) * first on an UNREG_LOGIN and then release the final * references. */ - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; + clear_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag); if (mbx_cmd == MBX_UNREG_LOGIN) - ndlp->nlp_flag &= ~NLP_UNREG_INP; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag); lpfc_nlp_put(ndlp); lpfc_drop_node(ndlp->vport, ndlp); } @@ -5381,23 +5330,23 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* ELS response tag completes */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0110 ELS response tag x%x completes " - "Data: x%x x%x x%x x%x x%x x%x x%x x%x %p %p\n", + "Data: x%x x%x x%x x%x x%lx x%x x%x x%x %p %p\n", iotag, ulp_status, ulp_word4, tmo, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi, kref_read(&ndlp->kref), mbox, ndlp); if (mbox) { - if (ulp_status == 0 - && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) { + if (ulp_status == 0 && + test_bit(NLP_ACC_REGLOGIN, &ndlp->nlp_flag)) { if (!lpfc_unreg_rpi(vport, ndlp) && !test_bit(FC_PT2PT, &vport->fc_flag)) { - if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || + if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0314 PLOGI recov " "DID x%x " - "Data: x%x x%x x%x\n", + "Data: x%x x%x x%lx\n", ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_rpi, @@ -5414,18 +5363,17 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, goto out_free_mbox; mbox->vport = vport; - if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) { + if (test_bit(NLP_RM_DFLT_RPI, &ndlp->nlp_flag)) { mbox->mbox_flag |= LPFC_MBX_IMED_UNREG; mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; - } - else { + } else { mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; ndlp->nlp_prev_state = ndlp->nlp_state; lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE); } - ndlp->nlp_flag |= NLP_REG_LOGIN_SEND; + set_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag); if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) != MBX_NOT_FINISHED) goto out; @@ -5434,12 +5382,12 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, * set for this failed mailbox command. */ lpfc_nlp_put(ndlp); - ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; + clear_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag); /* ELS rsp: Cannot issue reg_login for */ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0138 ELS rsp: Cannot issue reg_login for x%x " - "Data: x%x x%x x%x\n", + "Data: x%lx x%x x%x\n", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); } @@ -5448,32 +5396,20 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, } out: if (ndlp && shost) { - spin_lock_irq(&ndlp->lock); if (mbox) - ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN; - ndlp->nlp_flag &= ~NLP_RM_DFLT_RPI; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_ACC_REGLOGIN, &ndlp->nlp_flag); + clear_bit(NLP_RM_DFLT_RPI, &ndlp->nlp_flag); } /* An SLI4 NPIV instance wants to drop the node at this point under - * these conditions and release the RPI. + * these conditions because it doesn't need the login. */ if (phba->sli_rev == LPFC_SLI_REV4 && vport && vport->port_type == LPFC_NPIV_PORT && !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) { - if (ndlp->nlp_flag & NLP_RELEASE_RPI) { - if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE && - ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) { - lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; - ndlp->nlp_flag &= ~NLP_RELEASE_RPI; - spin_unlock_irq(&ndlp->lock); - } - lpfc_drop_node(vport, ndlp); - } else if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE && - ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE && - ndlp->nlp_state != NLP_STE_PRLI_ISSUE) { + if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE && + ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE && + ndlp->nlp_state != NLP_STE_PRLI_ISSUE) { /* Drop ndlp if there is no planned or outstanding * issued PRLI. * @@ -5540,9 +5476,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, ndlp->nlp_DID, ELS_CMD_ACC); if (!elsiocb) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_LOGO_ACC; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_LOGO_ACC, &ndlp->nlp_flag); return 1; } @@ -5570,7 +5504,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, pcmd += sizeof(uint32_t); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, - "Issue ACC: did:x%x flg:x%x", + "Issue ACC: did:x%x flg:x%lx", ndlp->nlp_DID, ndlp->nlp_flag, 0); break; case ELS_CMD_FLOGI: @@ -5649,7 +5583,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, } lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, - "Issue ACC FLOGI/PLOGI: did:x%x flg:x%x", + "Issue ACC FLOGI/PLOGI: did:x%x flg:x%lx", ndlp->nlp_DID, ndlp->nlp_flag, 0); break; case ELS_CMD_PRLO: @@ -5687,7 +5621,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED; lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, - "Issue ACC PRLO: did:x%x flg:x%x", + "Issue ACC PRLO: did:x%x flg:x%lx", ndlp->nlp_DID, ndlp->nlp_flag, 0); break; case ELS_CMD_RDF: @@ -5732,12 +5666,10 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, default: return 1; } - if (ndlp->nlp_flag & NLP_LOGO_ACC) { - spin_lock_irq(&ndlp->lock); - if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED || - ndlp->nlp_flag & NLP_REG_LOGIN_SEND)) - ndlp->nlp_flag &= ~NLP_LOGO_ACC; - spin_unlock_irq(&ndlp->lock); + if (test_bit(NLP_LOGO_ACC, &ndlp->nlp_flag)) { + if (!test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag) && + !test_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag)) + clear_bit(NLP_LOGO_ACC, &ndlp->nlp_flag); elsiocb->cmd_cmpl = lpfc_cmpl_els_logo_acc; } else { elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; @@ -5760,7 +5692,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, /* Xmit ELS ACC response tag */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0128 Xmit ELS ACC response Status: x%x, IoTag: x%x, " - "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x " + "XRI: x%x, DID: x%x, nlp_flag: x%lx nlp_state: x%x " "RPI: x%x, fc_flag x%lx refcnt %d\n", rc, elsiocb->iotag, elsiocb->sli4_xritag, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, @@ -5835,13 +5767,13 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError, /* Xmit ELS RJT response tag */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0129 Xmit ELS RJT x%x response tag x%x " - "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, " + "xri x%x, did x%x, nlp_flag x%lx, nlp_state x%x, " "rpi x%x\n", rejectError, elsiocb->iotag, get_job_ulpcontext(phba, elsiocb), ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, - "Issue LS_RJT: did:x%x flg:x%x err:x%x", + "Issue LS_RJT: did:x%x flg:x%lx err:x%x", ndlp->nlp_DID, ndlp->nlp_flag, rejectError); phba->fc_stat.elsXmitLSRJT++; @@ -5852,18 +5784,6 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError, return 1; } - /* The NPIV instance is rejecting this unsolicited ELS. Make sure the - * node's assigned RPI gets released provided this node is not already - * registered with the transport. - */ - if (phba->sli_rev == LPFC_SLI_REV4 && - vport->port_type == LPFC_NPIV_PORT && - !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_RELEASE_RPI; - spin_unlock_irq(&ndlp->lock); - } - rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); if (rc == IOCB_ERROR) { lpfc_els_free_iocb(phba, elsiocb); @@ -5944,7 +5864,7 @@ lpfc_issue_els_edc_rsp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, lpfc_format_edc_lft_desc(phba, tlv); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, - "Issue EDC ACC: did:x%x flg:x%x refcnt %d", + "Issue EDC ACC: did:x%x flg:x%lx refcnt %d", ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; @@ -5966,7 +5886,7 @@ lpfc_issue_els_edc_rsp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, /* Xmit ELS ACC response tag */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0152 Xmit EDC ACC response Status: x%x, IoTag: x%x, " - "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x " + "XRI: x%x, DID: x%x, nlp_flag: x%lx nlp_state: x%x " "RPI: x%x, fc_flag x%lx\n", rc, elsiocb->iotag, elsiocb->sli4_xritag, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, @@ -6035,7 +5955,7 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, /* Xmit ADISC ACC response tag */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0130 Xmit ADISC ACC response iotag x%x xri: " - "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n", + "x%x, did x%x, nlp_flag x%lx, nlp_state x%x rpi x%x\n", elsiocb->iotag, ulp_context, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); @@ -6051,7 +5971,7 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, ap->DID = be32_to_cpu(vport->fc_myDID); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, - "Issue ACC ADISC: did:x%x flg:x%x refcnt %d", + "Issue ACC ADISC: did:x%x flg:x%lx refcnt %d", ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); phba->fc_stat.elsXmitACC++; @@ -6157,7 +6077,7 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, /* Xmit PRLI ACC response tag */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0131 Xmit PRLI ACC response tag x%x xri x%x, " - "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", + "did x%x, nlp_flag x%lx, nlp_state x%x, rpi x%x\n", elsiocb->iotag, ulp_context, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); @@ -6228,7 +6148,7 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, "6015 NVME issue PRLI ACC word1 x%08x " - "word4 x%08x word5 x%08x flag x%x, " + "word4 x%08x word5 x%08x flag x%lx, " "fcp_info x%x nlp_type x%x\n", npr_nvme->word1, npr_nvme->word4, npr_nvme->word5, ndlp->nlp_flag, @@ -6243,7 +6163,7 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, ndlp->nlp_DID); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, - "Issue ACC PRLI: did:x%x flg:x%x", + "Issue ACC PRLI: did:x%x flg:x%lx", ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); phba->fc_stat.elsXmitACC++; @@ -6357,7 +6277,7 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format, } lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, - "Issue ACC RNID: did:x%x flg:x%x refcnt %d", + "Issue ACC RNID: did:x%x flg:x%lx refcnt %d", ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); phba->fc_stat.elsXmitACC++; @@ -6414,7 +6334,7 @@ lpfc_els_clear_rrq(struct lpfc_vport *vport, get_job_ulpcontext(phba, iocb)); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, - "Clear RRQ: did:x%x flg:x%x exchg:x%.08x", + "Clear RRQ: did:x%x flg:x%lx exchg:x%.08x", ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg); if (vport->fc_myDID == be32_to_cpu(bf_get(rrq_did, rrq))) xri = bf_get(rrq_oxid, rrq); @@ -6491,7 +6411,7 @@ lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data, memcpy(pcmd, data, cmdsize - sizeof(uint32_t)); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, - "Issue ACC ECHO: did:x%x flg:x%x refcnt %d", + "Issue ACC ECHO: did:x%x flg:x%lx refcnt %d", ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); phba->fc_stat.elsXmitACC++; @@ -6541,14 +6461,12 @@ lpfc_els_disc_adisc(struct lpfc_vport *vport) list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { if (ndlp->nlp_state != NLP_STE_NPR_NODE || - !(ndlp->nlp_flag & NLP_NPR_ADISC)) + !test_bit(NLP_NPR_ADISC, &ndlp->nlp_flag)) continue; - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_NPR_ADISC; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag); - if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) { + if (!test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag)) { /* This node was marked for ADISC but was not picked * for discovery. This is possible if the node was * missing in gidft response. @@ -6606,9 +6524,9 @@ lpfc_els_disc_plogi(struct lpfc_vport *vport) /* go thru NPR nodes and issue any remaining ELS PLOGIs */ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { if (ndlp->nlp_state == NLP_STE_NPR_NODE && - (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && - (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 && - (ndlp->nlp_flag & NLP_NPR_ADISC) == 0) { + test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag) && + !test_bit(NLP_DELAY_TMO, &ndlp->nlp_flag) && + !test_bit(NLP_NPR_ADISC, &ndlp->nlp_flag)) { ndlp->nlp_prev_state = ndlp->nlp_state; lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); @@ -7104,7 +7022,7 @@ lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context, lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "2171 Xmit RDP response tag x%x xri x%x, " - "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x", + "did x%x, nlp_flag x%lx, nlp_state x%x, rpi x%x", elsiocb->iotag, ulp_context, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); @@ -8078,7 +7996,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, */ if (vport->port_state <= LPFC_NS_QRY) { lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV RSCN ignore: did:x%x/ste:x%x flg:x%x", + "RCV RSCN ignore: did:x%x/ste:x%x flg:x%lx", ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); @@ -8108,7 +8026,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, vport->fc_flag, payload_len, *lp, vport->fc_rscn_id_cnt); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV RSCN vport: did:x%x/ste:x%x flg:x%x", + "RCV RSCN vport: did:x%x/ste:x%x flg:x%lx", ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); @@ -8145,7 +8063,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, if (test_bit(FC_RSCN_MODE, &vport->fc_flag) || test_bit(FC_NDISC_ACTIVE, &vport->fc_flag)) { lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV RSCN defer: did:x%x/ste:x%x flg:x%x", + "RCV RSCN defer: did:x%x/ste:x%x flg:x%lx", ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); set_bit(FC_RSCN_DEFERRED, &vport->fc_flag); @@ -8201,7 +8119,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, return 0; } lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV RSCN: did:x%x/ste:x%x flg:x%x", + "RCV RSCN: did:x%x/ste:x%x flg:x%lx", ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); set_bit(FC_RSCN_MODE, &vport->fc_flag); @@ -8707,7 +8625,7 @@ lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) /* Xmit ELS RLS ACC response tag */ lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, "2874 Xmit ELS RLS ACC response tag x%x xri x%x, " - "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", + "did x%x, nlp_flag x%lx, nlp_state x%x, rpi x%x\n", elsiocb->iotag, ulp_context, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); @@ -8869,7 +8787,7 @@ lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, /* Xmit ELS RLS ACC response tag */ lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, "2875 Xmit ELS RTV ACC response tag x%x xri x%x, " - "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x, " + "did x%x, nlp_flag x%lx, nlp_state x%x, rpi x%x, " "Data: x%x x%x x%x\n", elsiocb->iotag, ulp_context, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, @@ -9066,7 +8984,7 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize, /* Xmit ELS RPL ACC response tag */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0120 Xmit ELS RPL ACC response tag x%x " - "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, " + "xri x%x, did x%x, nlp_flag x%lx, nlp_state x%x, " "rpi x%x\n", elsiocb->iotag, ulp_context, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, @@ -9642,14 +9560,24 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport) mbx_tmo_err = test_bit(MBX_TMO_ERR, &phba->bit_flags); /* First we need to issue aborts to outstanding cmds on txcmpl */ list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { - if (piocb->cmd_flag & LPFC_IO_LIBDFC && !mbx_tmo_err) - continue; + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "2243 iotag = 0x%x cmd_flag = 0x%x " + "ulp_command = 0x%x this_vport %x " + "sli_flag = 0x%x\n", + piocb->iotag, piocb->cmd_flag, + get_job_cmnd(phba, piocb), + (piocb->vport == vport), + phba->sli.sli_flag); if (piocb->vport != vport) continue; - if (piocb->cmd_flag & LPFC_DRIVER_ABORTED && !mbx_tmo_err) - continue; + if ((phba->sli.sli_flag & LPFC_SLI_ACTIVE) && !mbx_tmo_err) { + if (piocb->cmd_flag & LPFC_IO_LIBDFC) + continue; + if (piocb->cmd_flag & LPFC_DRIVER_ABORTED) + continue; + } /* On the ELS ring we can have ELS_REQUESTs, ELS_RSPs, * or GEN_REQUESTs waiting for a CQE response. @@ -10411,14 +10339,11 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, * Do not process any unsolicited ELS commands * if the ndlp is in DEV_LOSS */ - spin_lock_irq(&ndlp->lock); - if (ndlp->nlp_flag & NLP_IN_DEV_LOSS) { - spin_unlock_irq(&ndlp->lock); + if (test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag)) { if (newnode) lpfc_nlp_put(ndlp); goto dropit; } - spin_unlock_irq(&ndlp->lock); elsiocb->ndlp = lpfc_nlp_get(ndlp); if (!elsiocb->ndlp) @@ -10447,7 +10372,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, switch (cmd) { case ELS_CMD_PLOGI: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV PLOGI: did:x%x/ste:x%x flg:x%x", + "RCV PLOGI: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvPLOGI++; @@ -10486,9 +10411,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, } } - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_TARGET_REMOVE; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_TARGET_REMOVE, &ndlp->nlp_flag); lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PLOGI); @@ -10496,7 +10419,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_FLOGI: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV FLOGI: did:x%x/ste:x%x flg:x%x", + "RCV FLOGI: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvFLOGI++; @@ -10523,7 +10446,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_LOGO: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV LOGO: did:x%x/ste:x%x flg:x%x", + "RCV LOGO: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvLOGO++; @@ -10540,7 +10463,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_PRLO: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV PRLO: did:x%x/ste:x%x flg:x%x", + "RCV PRLO: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvPRLO++; @@ -10569,7 +10492,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_ADISC: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV ADISC: did:x%x/ste:x%x flg:x%x", + "RCV ADISC: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); lpfc_send_els_event(vport, ndlp, payload); @@ -10584,7 +10507,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_PDISC: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV PDISC: did:x%x/ste:x%x flg:x%x", + "RCV PDISC: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvPDISC++; @@ -10598,7 +10521,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_FARPR: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV FARPR: did:x%x/ste:x%x flg:x%x", + "RCV FARPR: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvFARPR++; @@ -10606,7 +10529,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_FARP: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV FARP: did:x%x/ste:x%x flg:x%x", + "RCV FARP: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvFARP++; @@ -10614,7 +10537,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_FAN: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV FAN: did:x%x/ste:x%x flg:x%x", + "RCV FAN: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvFAN++; @@ -10623,7 +10546,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, case ELS_CMD_PRLI: case ELS_CMD_NVMEPRLI: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV PRLI: did:x%x/ste:x%x flg:x%x", + "RCV PRLI: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvPRLI++; @@ -10637,7 +10560,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_LIRR: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV LIRR: did:x%x/ste:x%x flg:x%x", + "RCV LIRR: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvLIRR++; @@ -10648,7 +10571,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_RLS: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV RLS: did:x%x/ste:x%x flg:x%x", + "RCV RLS: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvRLS++; @@ -10659,7 +10582,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_RPL: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV RPL: did:x%x/ste:x%x flg:x%x", + "RCV RPL: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvRPL++; @@ -10670,7 +10593,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_RNID: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV RNID: did:x%x/ste:x%x flg:x%x", + "RCV RNID: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvRNID++; @@ -10681,7 +10604,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_RTV: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV RTV: did:x%x/ste:x%x flg:x%x", + "RCV RTV: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvRTV++; lpfc_els_rcv_rtv(vport, elsiocb, ndlp); @@ -10691,7 +10614,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_RRQ: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV RRQ: did:x%x/ste:x%x flg:x%x", + "RCV RRQ: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvRRQ++; @@ -10702,7 +10625,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_ECHO: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV ECHO: did:x%x/ste:x%x flg:x%x", + "RCV ECHO: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvECHO++; @@ -10718,7 +10641,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_FPIN: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV FPIN: did:x%x/ste:x%x flg:x%x", + "RCV FPIN: did:x%x/ste:x%x " + "flg:x%lx", did, vport->port_state, ndlp->nlp_flag); lpfc_els_rcv_fpin(vport, (struct fc_els_fpin *)payload, @@ -11226,9 +11150,7 @@ lpfc_retry_pport_discovery(struct lpfc_hba *phba) return; mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000)); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_DELAY_TMO; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); ndlp->nlp_last_elscmd = ELS_CMD_FLOGI; phba->pport->port_state = LPFC_FLOGI; return; @@ -11359,11 +11281,9 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, list_for_each_entry_safe(np, next_np, &vport->fc_nodes, nlp_listp) { if ((np->nlp_state != NLP_STE_NPR_NODE) || - !(np->nlp_flag & NLP_NPR_ADISC)) + !test_bit(NLP_NPR_ADISC, &np->nlp_flag)) continue; - spin_lock_irq(&ndlp->lock); - np->nlp_flag &= ~NLP_NPR_ADISC; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_NPR_ADISC, &np->nlp_flag); lpfc_unreg_rpi(vport, np); } lpfc_cleanup_pending_mbox(vport); @@ -11566,7 +11486,7 @@ lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* NPIV LOGO completes to NPort */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "2928 NPIV LOGO completes to NPort x%x " - "Data: x%x x%x x%x x%x x%x x%x x%x\n", + "Data: x%x x%x x%x x%x x%x x%lx x%x\n", ndlp->nlp_DID, ulp_status, ulp_word4, tmo, vport->num_disc_nodes, kref_read(&ndlp->kref), ndlp->nlp_flag, @@ -11582,8 +11502,9 @@ lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* Wake up lpfc_vport_delete if waiting...*/ if (ndlp->logo_waitq) wake_up(ndlp->logo_waitq); + clear_bit(NLP_ISSUE_LOGO, &ndlp->nlp_flag); + clear_bit(NLP_LOGO_SND, &ndlp->nlp_flag); spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~(NLP_ISSUE_LOGO | NLP_LOGO_SND); ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO; spin_unlock_irq(&ndlp->lock); } @@ -11633,13 +11554,11 @@ lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name)); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, - "Issue LOGO npiv did:x%x flg:x%x", + "Issue LOGO npiv did:x%x flg:x%lx", ndlp->nlp_DID, ndlp->nlp_flag, 0); elsiocb->cmd_cmpl = lpfc_cmpl_els_npiv_logo; - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_LOGO_SND; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_LOGO_SND, &ndlp->nlp_flag); elsiocb->ndlp = lpfc_nlp_get(ndlp); if (!elsiocb->ndlp) { lpfc_els_free_iocb(phba, elsiocb); @@ -11655,9 +11574,7 @@ lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) return 0; err: - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_LOGO_SND; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_LOGO_SND, &ndlp->nlp_flag); return 1; } @@ -12138,7 +12055,7 @@ lpfc_sli_abts_recover_port(struct lpfc_vport *vport, lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3094 Start rport recovery on shost id 0x%x " "fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x " - "flags 0x%x\n", + "flag 0x%lx\n", shost->host_no, ndlp->nlp_DID, vport->vpi, ndlp->nlp_rpi, ndlp->nlp_state, ndlp->nlp_flag); @@ -12148,8 +12065,8 @@ lpfc_sli_abts_recover_port(struct lpfc_vport *vport, */ spin_lock_irqsave(&ndlp->lock, flags); ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; - ndlp->nlp_flag |= NLP_ISSUE_LOGO; spin_unlock_irqrestore(&ndlp->lock, flags); + set_bit(NLP_ISSUE_LOGO, &ndlp->nlp_flag); lpfc_unreg_rpi(vport, ndlp); } diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 9241075f72fa..4036a9838bb5 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -100,6 +100,12 @@ lpfc_rport_invalid(struct fc_rport *rport) return -EINVAL; } + if (rport->flags & FC_RPORT_DEVLOSS_CALLBK_DONE) { + pr_info("**** %s: devloss_callbk_done rport x%px SID x%x\n", + __func__, rport, rport->scsi_target_id); + return -EINVAL; + } + rdata = rport->dd_data; if (!rdata) { pr_err("**** %s: NULL dd_data on rport x%px SID x%x\n", @@ -137,7 +143,7 @@ lpfc_terminate_rport_io(struct fc_rport *rport) ndlp = rdata->pnode; vport = ndlp->vport; lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, - "rport terminate: sid:x%x did:x%x flg:x%x", + "rport terminate: sid:x%x did:x%x flg:x%lx", ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag); if (ndlp->nlp_sid != NLP_NO_SID) @@ -155,6 +161,7 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) struct lpfc_hba *phba; struct lpfc_work_evt *evtp; unsigned long iflags; + bool nvme_reg = false; ndlp = ((struct lpfc_rport_data *)rport->dd_data)->pnode; if (!ndlp) @@ -164,11 +171,11 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) phba = vport->phba; lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, - "rport devlosscb: sid:x%x did:x%x flg:x%x", + "rport devlosscb: sid:x%x did:x%x flg:x%lx", ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag); lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, - "3181 dev_loss_callbk x%06x, rport x%px flg x%x " + "3181 dev_loss_callbk x%06x, rport x%px flg x%lx " "load_flag x%lx refcnt %u state %d xpt x%x\n", ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag, vport->load_flag, kref_read(&ndlp->kref), @@ -177,38 +184,44 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) /* Don't schedule a worker thread event if the vport is going down. */ if (test_bit(FC_UNLOADING, &vport->load_flag) || !test_bit(HBA_SETUP, &phba->hba_flag)) { + spin_lock_irqsave(&ndlp->lock, iflags); ndlp->rport = NULL; + if (ndlp->fc4_xpt_flags & NVME_XPT_REGD) + nvme_reg = true; + /* The scsi_transport is done with the rport so lpfc cannot - * call to unregister. Remove the scsi transport reference - * and clean up the SCSI transport node details. + * call to unregister. */ - if (ndlp->fc4_xpt_flags & (NLP_XPT_REGD | SCSI_XPT_REGD)) { + if (ndlp->fc4_xpt_flags & SCSI_XPT_REGD) { ndlp->fc4_xpt_flags &= ~SCSI_XPT_REGD; - /* NVME transport-registered rports need the - * NLP_XPT_REGD flag to complete an unregister. + /* If NLP_XPT_REGD was cleared in lpfc_nlp_unreg_node, + * unregister calls were made to the scsi and nvme + * transports and refcnt was already decremented. Clear + * the NLP_XPT_REGD flag only if the NVME Rport is + * confirmed unregistered. */ - if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD)) + if (!nvme_reg && ndlp->fc4_xpt_flags & NLP_XPT_REGD) { ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD; + spin_unlock_irqrestore(&ndlp->lock, iflags); + lpfc_nlp_put(ndlp); /* may free ndlp */ + } else { + spin_unlock_irqrestore(&ndlp->lock, iflags); + } + } else { spin_unlock_irqrestore(&ndlp->lock, iflags); - lpfc_nlp_put(ndlp); - spin_lock_irqsave(&ndlp->lock, iflags); } /* Only 1 thread can drop the initial node reference. If * another thread has set NLP_DROPPED, this thread is done. */ - if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD) && - !(ndlp->nlp_flag & NLP_DROPPED)) { - ndlp->nlp_flag |= NLP_DROPPED; - spin_unlock_irqrestore(&ndlp->lock, iflags); - lpfc_nlp_put(ndlp); + if (nvme_reg || test_bit(NLP_DROPPED, &ndlp->nlp_flag)) return; - } - spin_unlock_irqrestore(&ndlp->lock, iflags); + set_bit(NLP_DROPPED, &ndlp->nlp_flag); + lpfc_nlp_put(ndlp); return; } @@ -235,14 +248,14 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) return; } - spin_lock_irqsave(&ndlp->lock, iflags); - ndlp->nlp_flag |= NLP_IN_DEV_LOSS; + set_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag); + spin_lock_irqsave(&ndlp->lock, iflags); /* If there is a PLOGI in progress, and we are in a * NLP_NPR_2B_DISC state, don't turn off the flag. */ if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE) - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); /* * The backend does not expect any more calls associated with this @@ -271,15 +284,13 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) } else { lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, "3188 worker thread is stopped %s x%06x, " - " rport x%px flg x%x load_flag x%lx refcnt " + " rport x%px flg x%lx load_flag x%lx refcnt " "%d\n", __func__, ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag, vport->load_flag, kref_read(&ndlp->kref)); if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD)) { - spin_lock_irqsave(&ndlp->lock, iflags); /* Node is in dev loss. No further transaction. */ - ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; - spin_unlock_irqrestore(&ndlp->lock, iflags); + clear_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag); lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); } @@ -412,7 +423,7 @@ lpfc_check_nlp_post_devloss(struct lpfc_vport *vport, lpfc_nlp_get(ndlp); lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_NODE, "8438 Devloss timeout reversed on DID x%x " - "refcnt %d ndlp %p flag x%x " + "refcnt %d ndlp %p flag x%lx " "port_state = x%x\n", ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp, ndlp->nlp_flag, vport->port_state); @@ -455,7 +466,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) ndlp->nlp_DID, ndlp->nlp_type, ndlp->nlp_sid); lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, - "3182 %s x%06x, nflag x%x xflags x%x refcnt %d\n", + "3182 %s x%06x, nflag x%lx xflags x%x refcnt %d\n", __func__, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->fc4_xpt_flags, kref_read(&ndlp->kref)); @@ -469,9 +480,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) *(name+4), *(name+5), *(name+6), *(name+7), ndlp->nlp_DID); - spin_lock_irqsave(&ndlp->lock, iflags); - ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; - spin_unlock_irqrestore(&ndlp->lock, iflags); + clear_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag); return fcf_inuse; } @@ -499,7 +508,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) } break; case Fabric_Cntl_DID: - if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND) + if (test_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag)) recovering = true; break; case FDMI_DID: @@ -527,15 +536,13 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) * the following lpfc_nlp_put is necessary after fabric node is * recovered. */ - spin_lock_irqsave(&ndlp->lock, iflags); - ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; - spin_unlock_irqrestore(&ndlp->lock, iflags); + clear_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag); if (recovering) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_NODE, "8436 Devloss timeout marked on " "DID x%x refcnt %d ndlp %p " - "flag x%x port_state = x%x\n", + "flag x%lx port_state = x%x\n", ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp, ndlp->nlp_flag, vport->port_state); @@ -552,7 +559,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) LOG_DISCOVERY | LOG_NODE, "8437 Devloss timeout ignored on " "DID x%x refcnt %d ndlp %p " - "flag x%x port_state = x%x\n", + "flag x%lx port_state = x%x\n", ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp, ndlp->nlp_flag, vport->port_state); @@ -572,7 +579,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0203 Devloss timeout on " "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x " - "NPort x%06x Data: x%x x%x x%x refcnt %d\n", + "NPort x%06x Data: x%lx x%x x%x refcnt %d\n", *name, *(name+1), *(name+2), *(name+3), *(name+4), *(name+5), *(name+6), *(name+7), ndlp->nlp_DID, ndlp->nlp_flag, @@ -582,15 +589,13 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) lpfc_printf_vlog(vport, KERN_INFO, LOG_TRACE_EVENT, "0204 Devloss timeout on " "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x " - "NPort x%06x Data: x%x x%x x%x\n", + "NPort x%06x Data: x%lx x%x x%x\n", *name, *(name+1), *(name+2), *(name+3), *(name+4), *(name+5), *(name+6), *(name+7), ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); } - spin_lock_irqsave(&ndlp->lock, iflags); - ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; - spin_unlock_irqrestore(&ndlp->lock, iflags); + clear_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag); /* If we are devloss, but we are in the process of rediscovering the * ndlp, don't issue a NLP_EVT_DEVICE_RM event. @@ -1355,7 +1360,7 @@ lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport) if (ndlp->nlp_DID != Fabric_DID) lpfc_unreg_rpi(vport, ndlp); lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); - } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { + } else if (!test_bit(NLP_NPR_ADISC, &ndlp->nlp_flag)) { /* Fail outstanding IO now since device is * marked for PLOGI. */ @@ -3864,14 +3869,13 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) pmb->ctx_ndlp = NULL; lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI | LOG_NODE | LOG_DISCOVERY, - "0002 rpi:%x DID:%x flg:%x %d x%px\n", + "0002 rpi:%x DID:%x flg:%lx %d x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref), ndlp); - if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND) - ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; + clear_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag); - if (ndlp->nlp_flag & NLP_IGNR_REG_CMPL || + if (test_bit(NLP_IGNR_REG_CMPL, &ndlp->nlp_flag) || ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) { /* We rcvd a rscn after issuing this * mbox reg login, we may have cycled @@ -3881,16 +3885,14 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) * there is another reg login in * process. */ - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_IGNR_REG_CMPL, &ndlp->nlp_flag); /* * We cannot leave the RPI registered because * if we go thru discovery again for this ndlp * a subsequent REG_RPI will fail. */ - ndlp->nlp_flag |= NLP_RPI_REGISTERED; + set_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag); lpfc_unreg_rpi(vport, ndlp); } @@ -4203,7 +4205,7 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) if (phba->sli_rev < LPFC_SLI_REV4) ndlp->nlp_rpi = mb->un.varWords[0]; - ndlp->nlp_flag |= NLP_RPI_REGISTERED; + set_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag); ndlp->nlp_type |= NLP_FABRIC; lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); @@ -4334,9 +4336,7 @@ lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) * reference. */ if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); lpfc_nlp_put(ndlp); } @@ -4357,11 +4357,11 @@ lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) if (phba->sli_rev < LPFC_SLI_REV4) ndlp->nlp_rpi = mb->un.varWords[0]; - ndlp->nlp_flag |= NLP_RPI_REGISTERED; + set_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag); ndlp->nlp_type |= NLP_FABRIC; lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY, - "0003 rpi:%x DID:%x flg:%x %d x%px\n", + "0003 rpi:%x DID:%x flg:%lx %d x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref), ndlp); @@ -4453,8 +4453,8 @@ lpfc_mbx_cmpl_fc_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) __func__, ndlp->nlp_DID, ndlp->nlp_rpi, ndlp->nlp_state); - ndlp->nlp_flag |= NLP_RPI_REGISTERED; - ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; + set_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag); + clear_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag); ndlp->nlp_type |= NLP_FABRIC; lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); @@ -4488,7 +4488,7 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, - "rport add: did:x%x flg:x%x type x%x", + "rport add: did:x%x flg:x%lx type x%x", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); /* Don't add the remote port if unloading. */ @@ -4556,7 +4556,7 @@ lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp) return; lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, - "rport delete: did:x%x flg:x%x type x%x", + "rport delete: did:x%x flg:x%lx type x%x", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, @@ -4672,7 +4672,7 @@ lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE | LOG_DISCOVERY, "0999 %s Not regd: ndlp x%px rport x%px DID " - "x%x FLG x%x XPT x%x\n", + "x%x FLG x%lx XPT x%x\n", __func__, ndlp, ndlp->rport, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->fc4_xpt_flags); return; @@ -4688,7 +4688,7 @@ lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) } else if (!ndlp->rport) { lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE | LOG_DISCOVERY, - "1999 %s NDLP in devloss x%px DID x%x FLG x%x" + "1999 %s NDLP in devloss x%px DID x%x FLG x%lx" " XPT x%x refcnt %u\n", __func__, ndlp, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->fc4_xpt_flags, @@ -4733,7 +4733,7 @@ lpfc_handle_adisc_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_type |= NLP_FC_NODE; fallthrough; case NLP_STE_MAPPED_NODE: - ndlp->nlp_flag &= ~NLP_NODEV_REMOVE; + clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); lpfc_nlp_reg_node(vport, ndlp); break; @@ -4744,7 +4744,7 @@ lpfc_handle_adisc_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, * backend, attempt it now */ case NLP_STE_NPR_NODE: - ndlp->nlp_flag &= ~NLP_RCV_PLOGI; + clear_bit(NLP_RCV_PLOGI, &ndlp->nlp_flag); fallthrough; default: lpfc_nlp_unreg_node(vport, ndlp); @@ -4765,13 +4765,13 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, } if (new_state == NLP_STE_UNMAPPED_NODE) { - ndlp->nlp_flag &= ~NLP_NODEV_REMOVE; + clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); ndlp->nlp_type |= NLP_FC_NODE; } if (new_state == NLP_STE_MAPPED_NODE) - ndlp->nlp_flag &= ~NLP_NODEV_REMOVE; + clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); if (new_state == NLP_STE_NPR_NODE) - ndlp->nlp_flag &= ~NLP_RCV_PLOGI; + clear_bit(NLP_RCV_PLOGI, &ndlp->nlp_flag); /* Reg/Unreg for FCP and NVME Transport interface */ if ((old_state == NLP_STE_MAPPED_NODE || @@ -4779,7 +4779,7 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* For nodes marked for ADISC, Handle unreg in ADISC cmpl * if linkup. In linkdown do unreg_node */ - if (!(ndlp->nlp_flag & NLP_NPR_ADISC) || + if (!test_bit(NLP_NPR_ADISC, &ndlp->nlp_flag) || !lpfc_is_link_up(vport->phba)) lpfc_nlp_unreg_node(vport, ndlp); } @@ -4799,9 +4799,7 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, (!ndlp->rport || ndlp->rport->scsi_target_id == -1 || ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_TGT_NO_SCSIID; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_TGT_NO_SCSIID, &ndlp->nlp_flag); lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); } } @@ -4833,7 +4831,7 @@ lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int state) { int old_state = ndlp->nlp_state; - int node_dropped = ndlp->nlp_flag & NLP_DROPPED; + bool node_dropped = test_bit(NLP_DROPPED, &ndlp->nlp_flag); char name1[16], name2[16]; unsigned long iflags; @@ -4849,7 +4847,7 @@ lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, if (node_dropped && old_state == NLP_STE_UNUSED_NODE && state != NLP_STE_UNUSED_NODE) { - ndlp->nlp_flag &= ~NLP_DROPPED; + clear_bit(NLP_DROPPED, &ndlp->nlp_flag); lpfc_nlp_get(ndlp); } @@ -4857,7 +4855,7 @@ lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, state != NLP_STE_NPR_NODE) lpfc_cancel_retry_delay_tmo(vport, ndlp); if (old_state == NLP_STE_UNMAPPED_NODE) { - ndlp->nlp_flag &= ~NLP_TGT_NO_SCSIID; + clear_bit(NLP_TGT_NO_SCSIID, &ndlp->nlp_flag); ndlp->nlp_type &= ~NLP_FC_NODE; } @@ -4954,14 +4952,8 @@ lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) * reference from lpfc_nlp_init. If set, don't drop it again and * introduce an imbalance. */ - spin_lock_irq(&ndlp->lock); - if (!(ndlp->nlp_flag & NLP_DROPPED)) { - ndlp->nlp_flag |= NLP_DROPPED; - spin_unlock_irq(&ndlp->lock); + if (!test_and_set_bit(NLP_DROPPED, &ndlp->nlp_flag)) lpfc_nlp_put(ndlp); - return; - } - spin_unlock_irq(&ndlp->lock); } /* @@ -5076,9 +5068,9 @@ lpfc_check_sli_ndlp(struct lpfc_hba *phba, } else if (pring->ringno == LPFC_FCP_RING) { /* Skip match check if waiting to relogin to FCP target */ if ((ndlp->nlp_type & NLP_FCP_TARGET) && - (ndlp->nlp_flag & NLP_DELAY_TMO)) { + test_bit(NLP_DELAY_TMO, &ndlp->nlp_flag)) return 0; - } + if (ulp_context == ndlp->nlp_rpi) return 1; } @@ -5148,7 +5140,7 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) * Everything that matches on txcmplq will be returned * by firmware with a no rpi error. */ - if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { + if (test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag)) { if (phba->sli_rev != LPFC_SLI_REV4) lpfc_sli3_dequeue_nport_iocbs(phba, ndlp, &completions); else @@ -5182,29 +5174,19 @@ lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) lpfc_issue_els_logo(vport, ndlp, 0); /* Check to see if there are any deferred events to process */ - if ((ndlp->nlp_flag & NLP_UNREG_INP) && - (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) { + if (test_bit(NLP_UNREG_INP, &ndlp->nlp_flag) && + ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "1434 UNREG cmpl deferred logo x%x " "on NPort x%x Data: x%x x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_defer_did, ndlp); - ndlp->nlp_flag &= ~NLP_UNREG_INP; + clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag); ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING; lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); } else { - /* NLP_RELEASE_RPI is only set for SLI4 ports. */ - if (ndlp->nlp_flag & NLP_RELEASE_RPI) { - lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_RELEASE_RPI; - ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; - spin_unlock_irq(&ndlp->lock); - } - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_UNREG_INP; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag); } /* The node has an outstanding reference for the unreg. Now @@ -5224,8 +5206,6 @@ static void lpfc_set_unreg_login_mbx_cmpl(struct lpfc_hba *phba, struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, LPFC_MBOXQ_t *mbox) { - unsigned long iflags; - /* Driver always gets a reference on the mailbox job * in support of async jobs. */ @@ -5233,9 +5213,8 @@ lpfc_set_unreg_login_mbx_cmpl(struct lpfc_hba *phba, struct lpfc_vport *vport, if (!mbox->ctx_ndlp) return; - if (ndlp->nlp_flag & NLP_ISSUE_LOGO) { + if (test_bit(NLP_ISSUE_LOGO, &ndlp->nlp_flag)) { mbox->mbox_cmpl = lpfc_nlp_logo_unreg; - } else if (phba->sli_rev == LPFC_SLI_REV4 && !test_bit(FC_UNLOADING, &vport->load_flag) && (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= @@ -5243,13 +5222,6 @@ lpfc_set_unreg_login_mbx_cmpl(struct lpfc_hba *phba, struct lpfc_vport *vport, (kref_read(&ndlp->kref) > 0)) { mbox->mbox_cmpl = lpfc_sli4_unreg_rpi_cmpl_clr; } else { - if (test_bit(FC_UNLOADING, &vport->load_flag)) { - if (phba->sli_rev == LPFC_SLI_REV4) { - spin_lock_irqsave(&ndlp->lock, iflags); - ndlp->nlp_flag |= NLP_RELEASE_RPI; - spin_unlock_irqrestore(&ndlp->lock, iflags); - } - } mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; } } @@ -5271,13 +5243,13 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) int rc, acc_plogi = 1; uint16_t rpi; - if (ndlp->nlp_flag & NLP_RPI_REGISTERED || - ndlp->nlp_flag & NLP_REG_LOGIN_SEND) { - if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND) + if (test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag) || + test_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag)) { + if (test_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag)) lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY, "3366 RPI x%x needs to be " - "unregistered nlp_flag x%x " + "unregistered nlp_flag x%lx " "did x%x\n", ndlp->nlp_rpi, ndlp->nlp_flag, ndlp->nlp_DID); @@ -5285,11 +5257,11 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) /* If there is already an UNREG in progress for this ndlp, * no need to queue up another one. */ - if (ndlp->nlp_flag & NLP_UNREG_INP) { + if (test_bit(NLP_UNREG_INP, &ndlp->nlp_flag)) { lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY, "1436 unreg_rpi SKIP UNREG x%x on " - "NPort x%x deferred x%x flg x%x " + "NPort x%x deferred x%x flg x%lx " "Data: x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_defer_did, @@ -5312,27 +5284,24 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) return 1; } + /* Accept PLOGIs after unreg_rpi_cmpl. */ if (mbox->mbox_cmpl == lpfc_sli4_unreg_rpi_cmpl_clr) - /* - * accept PLOGIs after unreg_rpi_cmpl - */ acc_plogi = 0; - if (((ndlp->nlp_DID & Fabric_DID_MASK) != - Fabric_DID_MASK) && - (!test_bit(FC_OFFLINE_MODE, &vport->fc_flag))) - ndlp->nlp_flag |= NLP_UNREG_INP; + + if (!test_bit(FC_OFFLINE_MODE, &vport->fc_flag)) + set_bit(NLP_UNREG_INP, &ndlp->nlp_flag); lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY, "1433 unreg_rpi UNREG x%x on " - "NPort x%x deferred flg x%x " + "NPort x%x deferred flg x%lx " "Data:x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, ndlp); rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { - ndlp->nlp_flag &= ~NLP_UNREG_INP; + clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag); mempool_free(mbox, phba->mbox_mem_pool); acc_plogi = 1; lpfc_nlp_put(ndlp); @@ -5342,7 +5311,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) LOG_NODE | LOG_DISCOVERY, "1444 Failed to allocate mempool " "unreg_rpi UNREG x%x, " - "DID x%x, flag x%x, " + "DID x%x, flag x%lx, " "ndlp x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, ndlp); @@ -5352,7 +5321,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) * not unloading. */ if (!test_bit(FC_UNLOADING, &vport->load_flag)) { - ndlp->nlp_flag &= ~NLP_UNREG_INP; + clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag); lpfc_issue_els_logo(vport, ndlp, 0); ndlp->nlp_prev_state = ndlp->nlp_state; lpfc_nlp_set_state(vport, ndlp, @@ -5365,13 +5334,13 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) out: if (phba->sli_rev != LPFC_SLI_REV4) ndlp->nlp_rpi = 0; - ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; - ndlp->nlp_flag &= ~NLP_NPR_ADISC; + clear_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag); + clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag); if (acc_plogi) - ndlp->nlp_flag &= ~NLP_LOGO_ACC; + clear_bit(NLP_LOGO_ACC, &ndlp->nlp_flag); return 1; } - ndlp->nlp_flag &= ~NLP_LOGO_ACC; + clear_bit(NLP_LOGO_ACC, &ndlp->nlp_flag); return 0; } @@ -5399,7 +5368,7 @@ lpfc_unreg_hba_rpis(struct lpfc_hba *phba) for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { spin_lock_irqsave(&vports[i]->fc_nodes_list_lock, iflags); list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) { - if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { + if (test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag)) { /* The mempool_alloc might sleep */ spin_unlock_irqrestore(&vports[i]->fc_nodes_list_lock, iflags); @@ -5487,7 +5456,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) /* Cleanup node for NPort */ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, "0900 Cleanup node for NPort x%x " - "Data: x%x x%x x%x\n", + "Data: x%lx x%x x%x\n", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); lpfc_dequeue_node(vport, ndlp); @@ -5532,9 +5501,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) lpfc_els_abort(phba, ndlp); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_DELAY_TMO; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); ndlp->nlp_last_elscmd = 0; del_timer_sync(&ndlp->nlp_delayfunc); @@ -5543,10 +5510,6 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) list_del_init(&ndlp->dev_loss_evt.evt_listp); list_del_init(&ndlp->recovery_evt.evt_listp); lpfc_cleanup_vports_rrqs(vport, ndlp); - - if (phba->sli_rev == LPFC_SLI_REV4) - ndlp->nlp_flag |= NLP_RELEASE_RPI; - return 0; } @@ -5620,7 +5583,7 @@ __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did) ); lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE_VERBOSE, "0929 FIND node DID " - "Data: x%px x%x x%x x%x x%x x%px\n", + "Data: x%px x%x x%lx x%x x%x x%px\n", ndlp, ndlp->nlp_DID, ndlp->nlp_flag, data1, ndlp->nlp_rpi, ndlp->active_rrqs_xri_bitmap); @@ -5667,7 +5630,7 @@ lpfc_findnode_mapped(struct lpfc_vport *vport) iflags); lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE_VERBOSE, "2025 FIND node DID MAPPED " - "Data: x%px x%x x%x x%x x%px\n", + "Data: x%px x%x x%lx x%x x%px\n", ndlp, ndlp->nlp_DID, ndlp->nlp_flag, data1, ndlp->active_rrqs_xri_bitmap); @@ -5701,13 +5664,11 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "6453 Setup New Node 2B_DISC x%x " - "Data:x%x x%x x%lx\n", + "Data:x%lx x%x x%lx\n", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, vport->fc_flag); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); return ndlp; } @@ -5726,7 +5687,7 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "6455 Setup RSCN Node 2B_DISC x%x " - "Data:x%x x%x x%lx\n", + "Data:x%lx x%x x%lx\n", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, vport->fc_flag); @@ -5744,13 +5705,11 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) NLP_EVT_DEVICE_RECOVERY); } - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); } else { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "6456 Skip Setup RSCN Node x%x " - "Data:x%x x%x x%lx\n", + "Data:x%lx x%x x%lx\n", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, vport->fc_flag); ndlp = NULL; @@ -5758,7 +5717,7 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) } else { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "6457 Setup Active Node 2B_DISC x%x " - "Data:x%x x%x x%lx\n", + "Data:x%lx x%x x%lx\n", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, vport->fc_flag); @@ -5769,7 +5728,7 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE || ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || (!vport->phba->nvmet_support && - ndlp->nlp_flag & NLP_RCV_PLOGI)) + test_bit(NLP_RCV_PLOGI, &ndlp->nlp_flag))) return NULL; if (vport->phba->nvmet_support) @@ -5779,10 +5738,7 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) * allows for rediscovery */ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); - - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); } return ndlp; } @@ -6153,7 +6109,7 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport) /* Clean up the ndlp on Fabric connections */ lpfc_drop_node(vport, ndlp); - } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { + } else if (!test_bit(NLP_NPR_ADISC, &ndlp->nlp_flag)) { /* Fail outstanding IO now since device * is marked for PLOGI. */ @@ -6366,11 +6322,11 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) if (phba->sli_rev < LPFC_SLI_REV4) ndlp->nlp_rpi = mb->un.varWords[0]; - ndlp->nlp_flag |= NLP_RPI_REGISTERED; + set_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag); ndlp->nlp_type |= NLP_FABRIC; lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY, - "0004 rpi:%x DID:%x flg:%x %d x%px\n", + "0004 rpi:%x DID:%x flg:%lx %d x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref), ndlp); @@ -6420,7 +6376,7 @@ __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param) if (filter(ndlp, param)) { lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE_VERBOSE, "3185 FIND node filter %ps DID " - "ndlp x%px did x%x flg x%x st x%x " + "ndlp x%px did x%x flg x%lx st x%x " "xri x%x type x%x rpi x%x\n", filter, ndlp, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, @@ -6555,9 +6511,10 @@ lpfc_nlp_init(struct lpfc_vport *vport, uint32_t did) INIT_LIST_HEAD(&ndlp->nlp_listp); if (vport->phba->sli_rev == LPFC_SLI_REV4) { ndlp->nlp_rpi = rpi; - lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY, - "0007 Init New ndlp x%px, rpi:x%x DID:%x " - "flg:x%x refcnt:%d\n", + lpfc_printf_vlog(vport, KERN_INFO, + LOG_ELS | LOG_NODE | LOG_DISCOVERY, + "0007 Init New ndlp x%px, rpi:x%x DID:x%x " + "flg:x%lx refcnt:%d\n", ndlp, ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); @@ -6589,7 +6546,7 @@ lpfc_nlp_release(struct kref *kref) struct lpfc_vport *vport = ndlp->vport; lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, - "node release: did:x%x flg:x%x type:x%x", + "node release: did:x%x flg:x%lx type:x%x", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, @@ -6601,19 +6558,12 @@ lpfc_nlp_release(struct kref *kref) lpfc_cancel_retry_delay_tmo(vport, ndlp); lpfc_cleanup_node(vport, ndlp); - /* Not all ELS transactions have registered the RPI with the port. - * In these cases the rpi usage is temporary and the node is - * released when the WQE is completed. Catch this case to free the - * RPI to the pool. Because this node is in the release path, a lock - * is unnecessary. All references are gone and the node has been - * dequeued. + /* All nodes are initialized with an RPI that needs to be released + * now. All references are gone and the node has been dequeued. */ - if (ndlp->nlp_flag & NLP_RELEASE_RPI) { - if (ndlp->nlp_rpi != LPFC_RPI_ALLOC_ERROR && - !(ndlp->nlp_flag & (NLP_RPI_REGISTERED | NLP_UNREG_INP))) { - lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi); - ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; - } + if (vport->phba->sli_rev == LPFC_SLI_REV4) { + lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi); + ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; } /* The node is not freed back to memory, it is released to a pool so @@ -6642,7 +6592,7 @@ lpfc_nlp_get(struct lpfc_nodelist *ndlp) if (ndlp) { lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, - "node get: did:x%x flg:x%x refcnt:x%x", + "node get: did:x%x flg:x%lx refcnt:x%x", ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); @@ -6674,7 +6624,7 @@ lpfc_nlp_put(struct lpfc_nodelist *ndlp) { if (ndlp) { lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, - "node put: did:x%x flg:x%x refcnt:x%x", + "node put: did:x%x flg:x%lx refcnt:x%x", ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); } else { @@ -6727,11 +6677,12 @@ lpfc_fcf_inuse(struct lpfc_hba *phba) spin_unlock_irqrestore(&vports[i]->fc_nodes_list_lock, iflags); goto out; - } else if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { + } else if (test_bit(NLP_RPI_REGISTERED, + &ndlp->nlp_flag)) { ret = 1; lpfc_printf_log(phba, KERN_INFO, LOG_NODE | LOG_DISCOVERY, - "2624 RPI %x DID %x flag %x " + "2624 RPI %x DID %x flag %lx " "still logged in\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag); diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 0dd451009b07..7f57397d91a9 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -1943,6 +1943,7 @@ lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action, lpfc_offline_prep(phba, mbx_action); lpfc_sli_flush_io_rings(phba); + lpfc_nvmels_flush_cmd(phba); lpfc_offline(phba); /* release interrupt for possible resource change */ lpfc_sli4_disable_intr(phba); @@ -3092,7 +3093,8 @@ lpfc_cleanup(struct lpfc_vport *vport) lpfc_printf_vlog(ndlp->vport, KERN_ERR, LOG_DISCOVERY, "0282 did:x%x ndlp:x%px " - "refcnt:%d xflags x%x nflag x%x\n", + "refcnt:%d xflags x%x " + "nflag x%lx\n", ndlp->nlp_DID, (void *)ndlp, kref_read(&ndlp->kref), ndlp->fc4_xpt_flags, @@ -3379,7 +3381,7 @@ lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action) } /** - * lpfc_sli4_node_prep - Assign RPIs for active nodes. + * lpfc_sli4_node_rpi_restore - Recover assigned RPIs for active nodes. * @phba: pointer to lpfc hba data structure. * * Allocate RPIs for all active remote nodes. This is needed whenever @@ -3387,7 +3389,7 @@ lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action) * is to fixup the temporary rpi assignments. **/ void -lpfc_sli4_node_prep(struct lpfc_hba *phba) +lpfc_sli4_node_rpi_restore(struct lpfc_hba *phba) { struct lpfc_nodelist *ndlp, *next_ndlp; struct lpfc_vport **vports; @@ -3397,10 +3399,10 @@ lpfc_sli4_node_prep(struct lpfc_hba *phba) return; vports = lpfc_create_vport_work_array(phba); - if (vports == NULL) + if (!vports) return; - for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { + for (i = 0; i <= phba->max_vports && vports[i]; i++) { if (test_bit(FC_UNLOADING, &vports[i]->load_flag)) continue; @@ -3409,14 +3411,20 @@ lpfc_sli4_node_prep(struct lpfc_hba *phba) nlp_listp) { rpi = lpfc_sli4_alloc_rpi(phba); if (rpi == LPFC_RPI_ALLOC_ERROR) { - /* TODO print log? */ + lpfc_printf_vlog(ndlp->vport, KERN_INFO, + LOG_NODE | LOG_DISCOVERY, + "0099 RPI alloc error for " + "ndlp x%px DID:x%06x " + "flg:x%lx\n", + ndlp, ndlp->nlp_DID, + ndlp->nlp_flag); continue; } ndlp->nlp_rpi = rpi; lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY, "0009 Assign RPI x%x to ndlp x%px " - "DID:x%06x flg:x%x\n", + "DID:x%06x flg:x%lx\n", ndlp->nlp_rpi, ndlp, ndlp->nlp_DID, ndlp->nlp_flag); } @@ -3820,35 +3828,12 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action) &vports[i]->fc_nodes, nlp_listp) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_NPR_ADISC; - spin_unlock_irq(&ndlp->lock); - + clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag); if (offline || hba_pci_err) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~(NLP_UNREG_INP | - NLP_RPI_REGISTERED); - spin_unlock_irq(&ndlp->lock); - if (phba->sli_rev == LPFC_SLI_REV4) - lpfc_sli_rpi_release(vports[i], - ndlp); - } else { - lpfc_unreg_rpi(vports[i], ndlp); - } - /* - * Whenever an SLI4 port goes offline, free the - * RPI. Get a new RPI when the adapter port - * comes back online. - */ - if (phba->sli_rev == LPFC_SLI_REV4) { - lpfc_printf_vlog(vports[i], KERN_INFO, - LOG_NODE | LOG_DISCOVERY, - "0011 Free RPI x%x on " - "ndlp: x%px did x%x\n", - ndlp->nlp_rpi, ndlp, - ndlp->nlp_DID); - lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); - ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; + clear_bit(NLP_UNREG_INP, + &ndlp->nlp_flag); + clear_bit(NLP_RPI_REGISTERED, + &ndlp->nlp_flag); } if (ndlp->nlp_type & NLP_FABRIC) { @@ -6925,9 +6910,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, */ mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000)); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_DELAY_TMO; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); ndlp->nlp_last_elscmd = ELS_CMD_FDISC; vport->port_state = LPFC_FDISC; } else { @@ -13518,6 +13501,8 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba) /* Disable FW logging to host memory */ lpfc_ras_stop_fwlog(phba); + lpfc_sli4_queue_unset(phba); + /* Reset SLI4 HBA FCoE function */ lpfc_pci_function_reset(phba); diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index 4574716c8764..4d88cfe71cae 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -65,7 +65,7 @@ lpfc_check_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, struct lpfc_name *nn, struct lpfc_name *pn) { /* First, we MUST have a RPI registered */ - if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED)) + if (!test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag)) return 0; /* Compare the ADISC rsp WWNN / WWPN matches our internal node @@ -239,7 +239,7 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) /* Abort outstanding I/O on NPort */ lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_DISCOVERY, "2819 Abort outstanding I/O on NPort x%x " - "Data: x%x x%x x%x\n", + "Data: x%lx x%x x%x\n", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); /* Clean up all fabric IOs first.*/ @@ -340,7 +340,7 @@ lpfc_defer_plogi_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *login_mbox) /* Now process the REG_RPI cmpl */ lpfc_mbx_cmpl_reg_login(phba, login_mbox); - ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN; + clear_bit(NLP_ACC_REGLOGIN, &ndlp->nlp_flag); kfree(save_iocb); } @@ -404,7 +404,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* PLOGI chkparm OK */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, - "0114 PLOGI chkparm OK Data: x%x x%x x%x " + "0114 PLOGI chkparm OK Data: x%x x%x x%lx " "x%x x%x x%lx\n", ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag, ndlp->nlp_rpi, vport->port_state, @@ -429,7 +429,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* if already logged in, do implicit logout */ switch (ndlp->nlp_state) { case NLP_STE_NPR_NODE: - if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) + if (!test_bit(NLP_NPR_ADISC, &ndlp->nlp_flag)) break; fallthrough; case NLP_STE_REG_LOGIN_ISSUE: @@ -449,7 +449,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR); ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; ndlp->nlp_nvme_info &= ~NLP_NVME_NSLER; - ndlp->nlp_flag &= ~NLP_FIRSTBURST; + clear_bit(NLP_FIRSTBURST, &ndlp->nlp_flag); lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL); @@ -480,7 +480,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR); ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; ndlp->nlp_nvme_info &= ~NLP_NVME_NSLER; - ndlp->nlp_flag &= ~NLP_FIRSTBURST; + clear_bit(NLP_FIRSTBURST, &ndlp->nlp_flag); login_mbox = NULL; link_mbox = NULL; @@ -552,13 +552,13 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, lpfc_can_disctmo(vport); } - ndlp->nlp_flag &= ~NLP_SUPPRESS_RSP; + clear_bit(NLP_SUPPRESS_RSP, &ndlp->nlp_flag); if ((phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) && sp->cmn.valid_vendor_ver_level) { vid = be32_to_cpu(sp->un.vv.vid); flag = be32_to_cpu(sp->un.vv.flags); if ((vid == LPFC_VV_EMLX_ID) && (flag & LPFC_VV_SUPPRESS_RSP)) - ndlp->nlp_flag |= NLP_SUPPRESS_RSP; + set_bit(NLP_SUPPRESS_RSP, &ndlp->nlp_flag); } login_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); @@ -627,10 +627,9 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, * this ELS request. The only way to do this is * to register, then unregister the RPI. */ - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= (NLP_RM_DFLT_RPI | NLP_ACC_REGLOGIN | - NLP_RCV_PLOGI); - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_RM_DFLT_RPI, &ndlp->nlp_flag); + set_bit(NLP_ACC_REGLOGIN, &ndlp->nlp_flag); + set_bit(NLP_RCV_PLOGI, &ndlp->nlp_flag); } stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD; @@ -665,9 +664,8 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, login_mbox->ctx_u.save_iocb = save_iocb; /* For PLOGI ACC */ - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI); - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_ACC_REGLOGIN, &ndlp->nlp_flag); + set_bit(NLP_RCV_PLOGI, &ndlp->nlp_flag); /* Start the ball rolling by issuing REG_LOGIN here */ rc = lpfc_sli_issue_mbox(phba, login_mbox, MBX_NOWAIT); @@ -797,7 +795,7 @@ lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, */ if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET)) { if ((ndlp->nlp_state != NLP_STE_MAPPED_NODE) && - !(ndlp->nlp_flag & NLP_NPR_ADISC)) + !test_bit(NLP_NPR_ADISC, &ndlp->nlp_flag)) lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE); } @@ -814,9 +812,7 @@ lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* 1 sec timeout */ mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000)); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_DELAY_TMO; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; ndlp->nlp_prev_state = ndlp->nlp_state; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); @@ -835,9 +831,7 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* Only call LOGO ACC for first LOGO, this avoids sending unnecessary * PLOGIs during LOGO storms from a device. */ - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_LOGO_ACC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_LOGO_ACC, &ndlp->nlp_flag); if (els_cmd == ELS_CMD_PRLO) lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL); else @@ -890,9 +884,7 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, */ mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000)); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_DELAY_TMO; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); ndlp->nlp_last_elscmd = ELS_CMD_FDISC; vport->port_state = LPFC_FDISC; } else { @@ -915,14 +907,12 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_state <= NLP_STE_PRLI_ISSUE)) { mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000 * 1)); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_DELAY_TMO; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_ELS | LOG_DISCOVERY, "3204 Start nlpdelay on DID x%06x " - "nflag x%x lastels x%x ref cnt %u", + "nflag x%lx lastels x%x ref cnt %u", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_last_elscmd, kref_read(&ndlp->kref)); @@ -935,9 +925,7 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_prev_state = ndlp->nlp_state; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_NPR_ADISC; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag); /* The driver has to wait until the ACC completes before it continues * processing the LOGO. The action will resume in * lpfc_cmpl_els_logo_acc routine. Since part of processing includes an @@ -978,7 +966,7 @@ lpfc_rcv_prli_support_check(struct lpfc_vport *vport, out: lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY, "6115 Rcv PRLI (%x) check failed: ndlp rpi %d " - "state x%x flags x%x port_type: x%x " + "state x%x flags x%lx port_type: x%x " "npr->initfcn: x%x npr->tgtfcn: x%x\n", cmd, ndlp->nlp_rpi, ndlp->nlp_state, ndlp->nlp_flag, vport->port_type, @@ -1020,7 +1008,7 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, if (npr->prliType == PRLI_NVME_TYPE) ndlp->nlp_type |= NLP_NVME_TARGET; if (npr->writeXferRdyDis) - ndlp->nlp_flag |= NLP_FIRSTBURST; + set_bit(NLP_FIRSTBURST, &ndlp->nlp_flag); } if (npr->Retry && ndlp->nlp_type & (NLP_FCP_INITIATOR | NLP_FCP_TARGET)) @@ -1057,7 +1045,7 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, roles |= FC_RPORT_ROLE_FCP_TARGET; lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, - "rport rolechg: role:x%x did:x%x flg:x%x", + "rport rolechg: role:x%x did:x%x flg:x%lx", roles, ndlp->nlp_DID, ndlp->nlp_flag); if (vport->cfg_enable_fc4_type != LPFC_ENABLE_NVME) @@ -1068,10 +1056,8 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, static uint32_t lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { - if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED)) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_NPR_ADISC; - spin_unlock_irq(&ndlp->lock); + if (!test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag)) { + clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag); return 0; } @@ -1081,16 +1067,12 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) (test_bit(FC_RSCN_MODE, &vport->fc_flag) || ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) && (ndlp->nlp_type & NLP_FCP_TARGET)))) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NPR_ADISC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_NPR_ADISC, &ndlp->nlp_flag); return 1; } } - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_NPR_ADISC; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag); lpfc_unreg_rpi(vport, ndlp); return 0; } @@ -1115,10 +1097,10 @@ lpfc_release_rpi(struct lpfc_hba *phba, struct lpfc_vport *vport, /* If there is already an UNREG in progress for this ndlp, * no need to queue up another one. */ - if (ndlp->nlp_flag & NLP_UNREG_INP) { + if (test_bit(NLP_UNREG_INP, &ndlp->nlp_flag)) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "1435 release_rpi SKIP UNREG x%x on " - "NPort x%x deferred x%x flg x%x " + "NPort x%x deferred x%x flg x%lx " "Data: x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_defer_did, @@ -1143,11 +1125,11 @@ lpfc_release_rpi(struct lpfc_hba *phba, struct lpfc_vport *vport, if (((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) && (!test_bit(FC_OFFLINE_MODE, &vport->fc_flag))) - ndlp->nlp_flag |= NLP_UNREG_INP; + set_bit(NLP_UNREG_INP, &ndlp->nlp_flag); lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "1437 release_rpi UNREG x%x " - "on NPort x%x flg x%x\n", + "on NPort x%x flg x%lx\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag); rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); @@ -1175,7 +1157,7 @@ lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, } lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0271 Illegal State Transition: node x%x " - "event x%x, state x%x Data: x%x x%x\n", + "event x%x, state x%x Data: x%x x%lx\n", ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi, ndlp->nlp_flag); return ndlp->nlp_state; @@ -1190,13 +1172,12 @@ lpfc_cmpl_plogi_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, * working on the same NPortID, do nothing for this thread * to stop it. */ - if (!(ndlp->nlp_flag & NLP_RCV_PLOGI)) { + if (!test_bit(NLP_RCV_PLOGI, &ndlp->nlp_flag)) lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0272 Illegal State Transition: node x%x " - "event x%x, state x%x Data: x%x x%x\n", + "event x%x, state x%x Data: x%x x%lx\n", ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi, ndlp->nlp_flag); - } return ndlp->nlp_state; } @@ -1230,9 +1211,7 @@ lpfc_rcv_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_LOGO_ACC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_LOGO_ACC, &ndlp->nlp_flag); lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); return ndlp->nlp_state; @@ -1290,11 +1269,9 @@ lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, NULL); } else { if (lpfc_rcv_plogi(vport, ndlp, cmdiocb) && - (ndlp->nlp_flag & NLP_NPR_2B_DISC) && - (vport->num_disc_nodes)) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag) && + vport->num_disc_nodes) { + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); /* Check if there are more PLOGIs to be sent */ lpfc_more_plogi(vport); if (vport->num_disc_nodes == 0) { @@ -1356,9 +1333,7 @@ lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* Put ndlp in npr state set plogi timer for 1 sec */ mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000 * 1)); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_DELAY_TMO; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); @@ -1389,7 +1364,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, ulp_status = get_job_ulpstatus(phba, rspiocb); - if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) { + if (test_bit(NLP_ACC_REGLOGIN, &ndlp->nlp_flag)) { /* Recovery from PLOGI collision logic */ return ndlp->nlp_state; } @@ -1418,7 +1393,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, goto out; /* PLOGI chkparm OK */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, - "0121 PLOGI chkparm OK Data: x%x x%x x%x x%x\n", + "0121 PLOGI chkparm OK Data: x%x x%x x%lx x%x\n", ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag, ndlp->nlp_rpi); if (vport->cfg_fcp_class == 2 && (sp->cls2.classValid)) @@ -1446,14 +1421,14 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, ed_tov = (phba->fc_edtov + 999999) / 1000000; } - ndlp->nlp_flag &= ~NLP_SUPPRESS_RSP; + clear_bit(NLP_SUPPRESS_RSP, &ndlp->nlp_flag); if ((phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) && sp->cmn.valid_vendor_ver_level) { vid = be32_to_cpu(sp->un.vv.vid); flag = be32_to_cpu(sp->un.vv.flags); if ((vid == LPFC_VV_EMLX_ID) && (flag & LPFC_VV_SUPPRESS_RSP)) - ndlp->nlp_flag |= NLP_SUPPRESS_RSP; + set_bit(NLP_SUPPRESS_RSP, &ndlp->nlp_flag); } /* @@ -1476,7 +1451,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, LOG_TRACE_EVENT, "0133 PLOGI: no memory " "for config_link " - "Data: x%x x%x x%x x%x\n", + "Data: x%x x%x x%lx x%x\n", ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag, ndlp->nlp_rpi); goto out; @@ -1500,7 +1475,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, if (!mbox) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0018 PLOGI: no memory for reg_login " - "Data: x%x x%x x%x x%x\n", + "Data: x%x x%x x%lx x%x\n", ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag, ndlp->nlp_rpi); goto out; @@ -1520,7 +1495,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, mbox->mbox_cmpl = lpfc_mbx_cmpl_fdmi_reg_login; break; default: - ndlp->nlp_flag |= NLP_REG_LOGIN_SEND; + set_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag); mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; } @@ -1535,8 +1510,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, NLP_STE_REG_LOGIN_ISSUE); return ndlp->nlp_state; } - if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND) - ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; + clear_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag); /* decrement node reference count to the failed mbox * command */ @@ -1544,7 +1518,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0134 PLOGI: cannot issue reg_login " - "Data: x%x x%x x%x x%x\n", + "Data: x%x x%x x%lx x%x\n", ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag, ndlp->nlp_rpi); } else { @@ -1552,7 +1526,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0135 PLOGI: cannot format reg_login " - "Data: x%x x%x x%x x%x\n", + "Data: x%x x%x x%lx x%x\n", ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag, ndlp->nlp_rpi); } @@ -1605,18 +1579,15 @@ static uint32_t lpfc_device_rm_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { - if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NODEV_REMOVE; - spin_unlock_irq(&ndlp->lock); + if (test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag)) { + set_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); return ndlp->nlp_state; - } else { - /* software abort outstanding PLOGI */ - lpfc_els_abort(vport->phba, ndlp); - - lpfc_drop_node(vport, ndlp); - return NLP_STE_FREED_NODE; } + /* software abort outstanding PLOGI */ + lpfc_els_abort(vport->phba, ndlp); + + lpfc_drop_node(vport, ndlp); + return NLP_STE_FREED_NODE; } static uint32_t @@ -1636,9 +1607,8 @@ lpfc_device_recov_plogi_issue(struct lpfc_vport *vport, ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); return ndlp->nlp_state; } @@ -1656,10 +1626,7 @@ lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, cmdiocb = (struct lpfc_iocbq *) arg; if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) { - if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + if (test_and_clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag)) { if (vport->num_disc_nodes) lpfc_more_adisc(vport); } @@ -1748,9 +1715,7 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport, /* 1 sec timeout */ mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000)); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_DELAY_TMO; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; @@ -1789,18 +1754,15 @@ static uint32_t lpfc_device_rm_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { - if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NODEV_REMOVE; - spin_unlock_irq(&ndlp->lock); + if (test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag)) { + set_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); return ndlp->nlp_state; - } else { - /* software abort outstanding ADISC */ - lpfc_els_abort(vport->phba, ndlp); - - lpfc_drop_node(vport, ndlp); - return NLP_STE_FREED_NODE; } + /* software abort outstanding ADISC */ + lpfc_els_abort(vport->phba, ndlp); + + lpfc_drop_node(vport, ndlp); + return NLP_STE_FREED_NODE; } static uint32_t @@ -1820,9 +1782,8 @@ lpfc_device_recov_adisc_issue(struct lpfc_vport *vport, ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); lpfc_disc_set_adisc(vport, ndlp); return ndlp->nlp_state; } @@ -1856,7 +1817,7 @@ lpfc_rcv_prli_reglogin_issue(struct lpfc_vport *vport, * transition to UNMAPPED provided the RPI has completed * registration. */ - if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { + if (test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag)) { lpfc_rcv_prli(vport, ndlp, cmdiocb); lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); } else { @@ -1895,7 +1856,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport, if ((mb = phba->sli.mbox_active)) { if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && (ndlp == mb->ctx_ndlp)) { - ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; + clear_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag); lpfc_nlp_put(ndlp); mb->ctx_ndlp = NULL; mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; @@ -1906,7 +1867,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport, list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && (ndlp == mb->ctx_ndlp)) { - ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; + clear_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag); lpfc_nlp_put(ndlp); list_del(&mb->list); phba->sli.mboxq_cnt--; @@ -1976,9 +1937,7 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport, /* Put ndlp in npr state set plogi timer for 1 sec */ mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000 * 1)); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_DELAY_TMO; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; lpfc_issue_els_logo(vport, ndlp, 0); @@ -1989,7 +1948,7 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport, if (phba->sli_rev < LPFC_SLI_REV4) ndlp->nlp_rpi = mb->un.varWords[0]; - ndlp->nlp_flag |= NLP_RPI_REGISTERED; + set_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag); /* Only if we are not a fabric nport do we issue PRLI */ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, @@ -2061,15 +2020,12 @@ lpfc_device_rm_reglogin_issue(struct lpfc_vport *vport, void *arg, uint32_t evt) { - if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NODEV_REMOVE; - spin_unlock_irq(&ndlp->lock); + if (test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag)) { + set_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); return ndlp->nlp_state; - } else { - lpfc_drop_node(vport, ndlp); - return NLP_STE_FREED_NODE; } + lpfc_drop_node(vport, ndlp); + return NLP_STE_FREED_NODE; } static uint32_t @@ -2084,17 +2040,16 @@ lpfc_device_recov_reglogin_issue(struct lpfc_vport *vport, ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); - spin_lock_irq(&ndlp->lock); /* If we are a target we won't immediately transition into PRLI, * so if REG_LOGIN already completed we don't need to ignore it. */ - if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED) || + if (!test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag) || !vport->phba->nvmet_support) - ndlp->nlp_flag |= NLP_IGNR_REG_CMPL; + set_bit(NLP_IGNR_REG_CMPL, &ndlp->nlp_flag); - ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); lpfc_disc_set_adisc(vport, ndlp); return ndlp->nlp_state; } @@ -2228,7 +2183,8 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, if (npr->targetFunc) { ndlp->nlp_type |= NLP_FCP_TARGET; if (npr->writeXferRdyDis) - ndlp->nlp_flag |= NLP_FIRSTBURST; + set_bit(NLP_FIRSTBURST, + &ndlp->nlp_flag); } if (npr->Retry) ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE; @@ -2272,7 +2228,7 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* Both sides support FB. The target's first * burst size is a 512 byte encoded value. */ - ndlp->nlp_flag |= NLP_FIRSTBURST; + set_bit(NLP_FIRSTBURST, &ndlp->nlp_flag); ndlp->nvme_fb_size = bf_get_be32(prli_fb_sz, nvpr); @@ -2287,7 +2243,7 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, "6029 NVME PRLI Cmpl w1 x%08x " - "w4 x%08x w5 x%08x flag x%x, " + "w4 x%08x w5 x%08x flag x%lx, " "fcp_info x%x nlp_type x%x\n", be32_to_cpu(nvpr->word1), be32_to_cpu(nvpr->word4), @@ -2299,9 +2255,7 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, (vport->port_type == LPFC_NPIV_PORT) && vport->cfg_restrict_login) { out: - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_TARGET_REMOVE; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_TARGET_REMOVE, &ndlp->nlp_flag); lpfc_issue_els_logo(vport, ndlp, 0); ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; @@ -2353,18 +2307,15 @@ static uint32_t lpfc_device_rm_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { - if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NODEV_REMOVE; - spin_unlock_irq(&ndlp->lock); + if (test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag)) { + set_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); return ndlp->nlp_state; - } else { - /* software abort outstanding PLOGI */ - lpfc_els_abort(vport->phba, ndlp); - - lpfc_drop_node(vport, ndlp); - return NLP_STE_FREED_NODE; } + /* software abort outstanding PLOGI */ + lpfc_els_abort(vport->phba, ndlp); + + lpfc_drop_node(vport, ndlp); + return NLP_STE_FREED_NODE; } @@ -2401,9 +2352,8 @@ lpfc_device_recov_prli_issue(struct lpfc_vport *vport, ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); lpfc_disc_set_adisc(vport, ndlp); return ndlp->nlp_state; } @@ -2442,9 +2392,7 @@ lpfc_rcv_logo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg; - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_LOGO_ACC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_LOGO_ACC, &ndlp->nlp_flag); lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); return ndlp->nlp_state; } @@ -2483,9 +2431,8 @@ lpfc_cmpl_logo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, { ndlp->nlp_prev_state = NLP_STE_LOGO_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); lpfc_disc_set_adisc(vport, ndlp); return ndlp->nlp_state; } @@ -2591,8 +2538,9 @@ lpfc_device_recov_unmap_node(struct lpfc_vport *vport, { ndlp->nlp_prev_state = NLP_STE_UNMAPPED_NODE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME); spin_unlock_irq(&ndlp->lock); lpfc_disc_set_adisc(vport, ndlp); @@ -2653,9 +2601,7 @@ lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT); /* Send PRLO_ACC */ - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_LOGO_ACC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_LOGO_ACC, &ndlp->nlp_flag); lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL); /* Save ELS_CMD_PRLO as the last elscmd and then set to NPR. @@ -2665,7 +2611,7 @@ lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_prev_state = ndlp->nlp_state; lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_ELS | LOG_DISCOVERY, - "3422 DID x%06x nflag x%x lastels x%x ref cnt %u\n", + "3422 DID x%06x nflag x%lx lastels x%x ref cnt %u\n", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_last_elscmd, kref_read(&ndlp->kref)); @@ -2685,8 +2631,9 @@ lpfc_device_recov_mapped_node(struct lpfc_vport *vport, ndlp->nlp_prev_state = NLP_STE_MAPPED_NODE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME); spin_unlock_irq(&ndlp->lock); return ndlp->nlp_state; @@ -2699,16 +2646,16 @@ lpfc_rcv_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; /* Ignore PLOGI if we have an outstanding LOGO */ - if (ndlp->nlp_flag & (NLP_LOGO_SND | NLP_LOGO_ACC)) + if (test_bit(NLP_LOGO_SND, &ndlp->nlp_flag) || + test_bit(NLP_LOGO_ACC, &ndlp->nlp_flag)) return ndlp->nlp_state; if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) { lpfc_cancel_retry_delay_tmo(vport, ndlp); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~(NLP_NPR_ADISC | NLP_NPR_2B_DISC); - spin_unlock_irq(&ndlp->lock); - } else if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) { + clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag); + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); + } else if (!test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag)) { /* send PLOGI immediately, move to PLOGI issue state */ - if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { + if (!test_bit(NLP_DELAY_TMO, &ndlp->nlp_flag)) { ndlp->nlp_prev_state = NLP_STE_NPR_NODE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); @@ -2729,14 +2676,14 @@ lpfc_rcv_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); - if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { + if (!test_bit(NLP_DELAY_TMO, &ndlp->nlp_flag)) { /* * ADISC nodes will be handled in regular discovery path after * receiving response from NS. * * For other nodes, Send PLOGI to trigger an implicit LOGO. */ - if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { + if (!test_bit(NLP_NPR_ADISC, &ndlp->nlp_flag)) { ndlp->nlp_prev_state = NLP_STE_NPR_NODE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); @@ -2767,15 +2714,15 @@ lpfc_rcv_padisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, * or discovery in progress for this node. Starting discovery * here will affect the counting of discovery threads. */ - if (!(ndlp->nlp_flag & NLP_DELAY_TMO) && - !(ndlp->nlp_flag & NLP_NPR_2B_DISC)) { + if (!test_bit(NLP_DELAY_TMO, &ndlp->nlp_flag) && + !test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag)) { /* * ADISC nodes will be handled in regular discovery path after * receiving response from NS. * * For other nodes, Send PLOGI to trigger an implicit LOGO. */ - if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { + if (!test_bit(NLP_NPR_ADISC, &ndlp->nlp_flag)) { ndlp->nlp_prev_state = NLP_STE_NPR_NODE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); @@ -2790,24 +2737,18 @@ lpfc_rcv_prlo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_LOGO_ACC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_LOGO_ACC, &ndlp->nlp_flag); lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); - if ((ndlp->nlp_flag & NLP_DELAY_TMO) == 0) { + if (!test_bit(NLP_DELAY_TMO, &ndlp->nlp_flag)) { mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000 * 1)); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_DELAY_TMO; - ndlp->nlp_flag &= ~NLP_NPR_ADISC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); + clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag); ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; } else { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_NPR_ADISC; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag); } return ndlp->nlp_state; } @@ -2844,7 +2785,7 @@ lpfc_cmpl_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ulp_status = get_job_ulpstatus(phba, rspiocb); - if (ulp_status && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) { + if (ulp_status && test_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag)) { lpfc_drop_node(vport, ndlp); return NLP_STE_FREED_NODE; } @@ -2877,7 +2818,7 @@ lpfc_cmpl_adisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ulp_status = get_job_ulpstatus(phba, rspiocb); - if (ulp_status && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) { + if (ulp_status && test_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag)) { lpfc_drop_node(vport, ndlp); return NLP_STE_FREED_NODE; } @@ -2896,12 +2837,11 @@ lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport, /* SLI4 ports have preallocated logical rpis. */ if (vport->phba->sli_rev < LPFC_SLI_REV4) ndlp->nlp_rpi = mb->un.varWords[0]; - ndlp->nlp_flag |= NLP_RPI_REGISTERED; - if (ndlp->nlp_flag & NLP_LOGO_ACC) { + set_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag); + if (test_bit(NLP_LOGO_ACC, &ndlp->nlp_flag)) lpfc_unreg_rpi(vport, ndlp); - } } else { - if (ndlp->nlp_flag & NLP_NODEV_REMOVE) { + if (test_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag)) { lpfc_drop_node(vport, ndlp); return NLP_STE_FREED_NODE; } @@ -2913,10 +2853,8 @@ static uint32_t lpfc_device_rm_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { - if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NODEV_REMOVE; - spin_unlock_irq(&ndlp->lock); + if (test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag)) { + set_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); return ndlp->nlp_state; } lpfc_drop_node(vport, ndlp); @@ -2932,8 +2870,9 @@ lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, return ndlp->nlp_state; lpfc_cancel_retry_delay_tmo(vport, ndlp); + clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME); spin_unlock_irq(&ndlp->lock); return ndlp->nlp_state; @@ -3146,7 +3085,7 @@ lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* DSM in event on NPort in state */ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0211 DSM in event x%x on NPort x%x in " - "state %d rpi x%x Data: x%x x%x\n", + "state %d rpi x%x Data: x%lx x%x\n", evt, ndlp->nlp_DID, cur_state, ndlp->nlp_rpi, ndlp->nlp_flag, data1); @@ -3163,12 +3102,12 @@ lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ((uint32_t)ndlp->nlp_type)); lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0212 DSM out state %d on NPort x%x " - "rpi x%x Data: x%x x%x\n", + "rpi x%x Data: x%lx x%x\n", rc, ndlp->nlp_DID, ndlp->nlp_rpi, ndlp->nlp_flag, data1); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM, - "DSM out: ste:%d did:x%x flg:x%x", + "DSM out: ste:%d did:x%x flg:x%lx", rc, ndlp->nlp_DID, ndlp->nlp_flag); /* Decrement the ndlp reference count held for this function */ lpfc_nlp_put(ndlp); diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c index d70da2736c94..6e0b27460c24 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.c +++ b/drivers/scsi/lpfc/lpfc_nvme.c @@ -1232,7 +1232,7 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport, /* Word 5 */ if ((phba->cfg_nvme_enable_fb) && - (pnode->nlp_flag & NLP_FIRSTBURST)) { + test_bit(NLP_FIRSTBURST, &pnode->nlp_flag)) { req_len = lpfc_ncmd->nvmeCmd->payload_length; if (req_len < pnode->nvme_fb_size) wqe->fcp_iwrite.initial_xfer_len = @@ -2231,6 +2231,7 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport, struct lpfc_hba *phba = vport->phba; struct lpfc_sli4_hdw_queue *qp; int abts_scsi, abts_nvme; + u16 nvmels_cnt; /* Host transport has to clean up and confirm requiring an indefinite * wait. Print a message if a 10 second wait expires and renew the @@ -2243,6 +2244,7 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport, pending = 0; abts_scsi = 0; abts_nvme = 0; + nvmels_cnt = 0; for (i = 0; i < phba->cfg_hdw_queue; i++) { qp = &phba->sli4_hba.hdwq[i]; if (!vport->localport || !qp || !qp->io_wq) @@ -2255,6 +2257,11 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport, abts_scsi += qp->abts_scsi_io_bufs; abts_nvme += qp->abts_nvme_io_bufs; } + if (phba->sli4_hba.nvmels_wq) { + pring = phba->sli4_hba.nvmels_wq->pring; + if (pring) + nvmels_cnt = pring->txcmplq_cnt; + } if (!vport->localport || test_bit(HBA_PCI_ERR, &vport->phba->bit_flags) || phba->link_state == LPFC_HBA_ERROR || @@ -2263,10 +2270,10 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport, lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6176 Lport x%px Localport x%px wait " - "timed out. Pending %d [%d:%d]. " + "timed out. Pending %d [%d:%d:%d]. " "Renewing.\n", lport, vport->localport, pending, - abts_scsi, abts_nvme); + abts_scsi, abts_nvme, nvmels_cnt); continue; } break; @@ -2644,14 +2651,11 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) * reference. Check if another thread has set * NLP_DROPPED. */ - spin_lock_irq(&ndlp->lock); - if (!(ndlp->nlp_flag & NLP_DROPPED)) { - ndlp->nlp_flag |= NLP_DROPPED; - spin_unlock_irq(&ndlp->lock); + if (!test_and_set_bit(NLP_DROPPED, + &ndlp->nlp_flag)) { lpfc_nlp_put(ndlp); return; } - spin_unlock_irq(&ndlp->lock); } } } @@ -2841,3 +2845,43 @@ lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, (pwqeIn->cmd_cmpl)(phba, pwqeIn, pwqeIn); #endif } + +/** + * lpfc_nvmels_flush_cmd - Clean up outstanding nvmels commands for a port + * @phba: Pointer to HBA context object. + * + **/ +void +lpfc_nvmels_flush_cmd(struct lpfc_hba *phba) +{ +#if (IS_ENABLED(CONFIG_NVME_FC)) + LIST_HEAD(cancel_list); + struct lpfc_sli_ring *pring = NULL; + struct lpfc_iocbq *piocb, *tmp_iocb; + unsigned long iflags; + + if (phba->sli4_hba.nvmels_wq) + pring = phba->sli4_hba.nvmels_wq->pring; + + if (unlikely(!pring)) + return; + + spin_lock_irqsave(&phba->hbalock, iflags); + spin_lock(&pring->ring_lock); + list_splice_init(&pring->txq, &cancel_list); + pring->txq_cnt = 0; + list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { + if (piocb->cmd_flag & LPFC_IO_NVME_LS) { + list_move_tail(&piocb->list, &cancel_list); + pring->txcmplq_cnt--; + piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ; + } + } + spin_unlock(&pring->ring_lock); + spin_unlock_irqrestore(&phba->hbalock, iflags); + + if (!list_empty(&cancel_list)) + lpfc_sli_cancel_iocbs(phba, &cancel_list, IOSTAT_LOCAL_REJECT, + IOERR_SLI_DOWN); +#endif +} diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c index 0cef5d089f34..3b29d57d77ac 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.c +++ b/drivers/scsi/lpfc/lpfc_nvmet.c @@ -2854,7 +2854,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, /* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */ if (rsp->rsplen == LPFC_NVMET_SUCCESS_LEN) { - if (ndlp->nlp_flag & NLP_SUPPRESS_RSP) + if (test_bit(NLP_SUPPRESS_RSP, &ndlp->nlp_flag)) bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 1); } else { diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 0eaede8275da..908b2f7dd70c 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -4629,7 +4629,7 @@ static int lpfc_scsi_prep_cmnd_buf_s3(struct lpfc_vport *vport, iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR; iocb_cmd->ulpPU = PARM_READ_CHECK; if (vport->cfg_first_burst_size && - (pnode->nlp_flag & NLP_FIRSTBURST)) { + test_bit(NLP_FIRSTBURST, &pnode->nlp_flag)) { u32 xrdy_len; fcpdl = scsi_bufflen(scsi_cmnd); @@ -5829,7 +5829,7 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct fc_rport *rport, lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, "0702 Issue %s to TGT %d LUN %llu " - "rpi x%x nlp_flag x%x Data: x%x x%x\n", + "rpi x%x nlp_flag x%lx Data: x%x x%x\n", lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id, pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag, iocbq->cmd_flag); @@ -6094,8 +6094,8 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd) lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0722 Target Reset rport failure: rdata x%px\n", rdata); if (pnode) { + clear_bit(NLP_NPR_ADISC, &pnode->nlp_flag); spin_lock_irqsave(&pnode->lock, flags); - pnode->nlp_flag &= ~NLP_NPR_ADISC; pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; spin_unlock_irqrestore(&pnode->lock, flags); } @@ -6124,7 +6124,7 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd) !pnode->logo_waitq) { pnode->logo_waitq = &waitq; pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; - pnode->nlp_flag |= NLP_ISSUE_LOGO; + set_bit(NLP_ISSUE_LOGO, &pnode->nlp_flag); pnode->save_flags |= NLP_WAIT_FOR_LOGO; spin_unlock_irqrestore(&pnode->lock, flags); lpfc_unreg_rpi(vport, pnode); diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 2ec6e55771b4..874644b31a3e 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -1932,7 +1932,7 @@ lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total) union lpfc_wqe128 *wqe; struct lpfc_iocbq *sync_buf; unsigned long iflags; - u32 ret_val; + u32 ret_val, cgn_sig_freq; u32 atot, wtot, max; u8 warn_sync_period = 0; @@ -1987,8 +1987,10 @@ lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total) } else if (wtot) { if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY || phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) { + cgn_sig_freq = phba->cgn_sig_freq ? phba->cgn_sig_freq : + lpfc_fabric_cgn_frequency; /* We hit an Signal warning condition */ - max = LPFC_SEC_TO_MSEC / lpfc_fabric_cgn_frequency * + max = LPFC_SEC_TO_MSEC / cgn_sig_freq * lpfc_acqe_cgn_frequency; bf_set(cmf_sync_wsigmax, &wqe->cmf_sync, max); bf_set(cmf_sync_wsigcnt, &wqe->cmf_sync, wtot); @@ -2842,27 +2844,6 @@ lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) return; } -static void -__lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) -{ - unsigned long iflags; - - if (ndlp->nlp_flag & NLP_RELEASE_RPI) { - lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi); - spin_lock_irqsave(&ndlp->lock, iflags); - ndlp->nlp_flag &= ~NLP_RELEASE_RPI; - ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; - spin_unlock_irqrestore(&ndlp->lock, iflags); - } - ndlp->nlp_flag &= ~NLP_UNREG_INP; -} - -void -lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) -{ - __lpfc_sli_rpi_release(vport, ndlp); -} - /** * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler * @phba: Pointer to HBA context object. @@ -2932,18 +2913,16 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) vport, KERN_INFO, LOG_MBOX | LOG_DISCOVERY, "1438 UNREG cmpl deferred mbox x%x " - "on NPort x%x Data: x%x x%x x%px x%lx x%x\n", + "on NPort x%x Data: x%lx x%x x%px x%lx x%x\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_defer_did, ndlp, vport->load_flag, kref_read(&ndlp->kref)); - if ((ndlp->nlp_flag & NLP_UNREG_INP) && - (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) { - ndlp->nlp_flag &= ~NLP_UNREG_INP; + if (test_bit(NLP_UNREG_INP, &ndlp->nlp_flag) && + ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING) { + clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag); ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING; lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); - } else { - __lpfc_sli_rpi_release(vport, ndlp); } /* The unreg_login mailbox is complete and had a @@ -2991,6 +2970,7 @@ lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { struct lpfc_vport *vport = pmb->vport; struct lpfc_nodelist *ndlp; + bool unreg_inp; ndlp = pmb->ctx_ndlp; if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) { @@ -3003,20 +2983,26 @@ lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) vport, KERN_INFO, LOG_MBOX | LOG_SLI | LOG_NODE, "0010 UNREG_LOGIN vpi:x%x " - "rpi:%x DID:%x defer x%x flg x%x " + "rpi:%x DID:%x defer x%x flg x%lx " "x%px\n", vport->vpi, ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_defer_did, ndlp->nlp_flag, ndlp); - ndlp->nlp_flag &= ~NLP_LOGO_ACC; + + /* Cleanup the nlp_flag now that the UNREG RPI + * has completed. + */ + unreg_inp = test_and_clear_bit(NLP_UNREG_INP, + &ndlp->nlp_flag); + clear_bit(NLP_LOGO_ACC, &ndlp->nlp_flag); /* Check to see if there are any deferred * events to process */ - if ((ndlp->nlp_flag & NLP_UNREG_INP) && - (ndlp->nlp_defer_did != - NLP_EVT_NOTHING_PENDING)) { + if (unreg_inp && + ndlp->nlp_defer_did != + NLP_EVT_NOTHING_PENDING) { lpfc_printf_vlog( vport, KERN_INFO, LOG_MBOX | LOG_SLI | LOG_NODE, @@ -3025,14 +3011,12 @@ lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) "NPort x%x Data: x%x x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_defer_did, ndlp); - ndlp->nlp_flag &= ~NLP_UNREG_INP; ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING; lpfc_issue_els_plogi( vport, ndlp->nlp_DID, 0); - } else { - __lpfc_sli_rpi_release(vport, ndlp); } + lpfc_nlp_put(ndlp); } } @@ -5291,6 +5275,8 @@ lpfc_sli_brdrestart_s4(struct lpfc_hba *phba) "0296 Restart HBA Data: x%x x%x\n", phba->pport->port_state, psli->sli_flag); + lpfc_sli4_queue_unset(phba); + rc = lpfc_sli4_brdreset(phba); if (rc) { phba->link_state = LPFC_HBA_ERROR; @@ -8748,6 +8734,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) lpfc_sli_config_mbox_opcode_get( phba, mboxq), rc, dd); + /* * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent * calls depends on these resources to complete port setup. @@ -8760,6 +8747,8 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) goto out_free_mbox; } + lpfc_sli4_node_rpi_restore(phba); + lpfc_set_host_data(phba, mboxq); rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); @@ -8947,7 +8936,6 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) rc = -ENODEV; goto out_free_iocblist; } - lpfc_sli4_node_prep(phba); if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag)) { if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) { @@ -14352,9 +14340,7 @@ lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) * an unsolicited PLOGI from the same NPortId from * starting another mailbox transaction. */ - spin_lock_irqsave(&ndlp->lock, iflags); - ndlp->nlp_flag |= NLP_UNREG_INP; - spin_unlock_irqrestore(&ndlp->lock, iflags); + set_bit(NLP_UNREG_INP, &ndlp->nlp_flag); lpfc_unreg_login(phba, vport->vpi, pmbox->un.varWords[0], pmb); pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; @@ -17625,6 +17611,9 @@ lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq) if (!eq) return -ENODEV; + if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) + goto list_remove; + mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) return -ENOMEM; @@ -17651,10 +17640,12 @@ lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq) shdr_status, shdr_add_status, rc); status = -ENXIO; } + mempool_free(mbox, eq->phba->mbox_mem_pool); +list_remove: /* Remove eq from any list */ list_del_init(&eq->list); - mempool_free(mbox, eq->phba->mbox_mem_pool); + return status; } @@ -17682,6 +17673,10 @@ lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq) /* sanity check on queue memory */ if (!cq) return -ENODEV; + + if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) + goto list_remove; + mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) return -ENOMEM; @@ -17707,9 +17702,11 @@ lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq) shdr_status, shdr_add_status, rc); status = -ENXIO; } + mempool_free(mbox, cq->phba->mbox_mem_pool); + +list_remove: /* Remove cq from any list */ list_del_init(&cq->list); - mempool_free(mbox, cq->phba->mbox_mem_pool); return status; } @@ -17737,6 +17734,10 @@ lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq) /* sanity check on queue memory */ if (!mq) return -ENODEV; + + if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) + goto list_remove; + mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) return -ENOMEM; @@ -17762,9 +17763,11 @@ lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq) shdr_status, shdr_add_status, rc); status = -ENXIO; } + mempool_free(mbox, mq->phba->mbox_mem_pool); + +list_remove: /* Remove mq from any list */ list_del_init(&mq->list); - mempool_free(mbox, mq->phba->mbox_mem_pool); return status; } @@ -17792,6 +17795,10 @@ lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq) /* sanity check on queue memory */ if (!wq) return -ENODEV; + + if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) + goto list_remove; + mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) return -ENOMEM; @@ -17816,11 +17823,13 @@ lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq) shdr_status, shdr_add_status, rc); status = -ENXIO; } + mempool_free(mbox, wq->phba->mbox_mem_pool); + +list_remove: /* Remove wq from any list */ list_del_init(&wq->list); kfree(wq->pring); wq->pring = NULL; - mempool_free(mbox, wq->phba->mbox_mem_pool); return status; } @@ -17850,6 +17859,10 @@ lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq, /* sanity check on queue memory */ if (!hrq || !drq) return -ENODEV; + + if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) + goto list_remove; + mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) return -ENOMEM; @@ -17890,9 +17903,11 @@ lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq, shdr_status, shdr_add_status, rc); status = -ENXIO; } + mempool_free(mbox, hrq->phba->mbox_mem_pool); + +list_remove: list_del_init(&hrq->list); list_del_init(&drq->list); - mempool_free(mbox, hrq->phba->mbox_mem_pool); return status; } @@ -19074,9 +19089,9 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport, * to free ndlp when transmit completes */ if (ndlp->nlp_state == NLP_STE_UNUSED_NODE && - !(ndlp->nlp_flag & NLP_DROPPED) && + !test_bit(NLP_DROPPED, &ndlp->nlp_flag) && !(ndlp->fc4_xpt_flags & (NVME_XPT_REGD | SCSI_XPT_REGD))) { - ndlp->nlp_flag |= NLP_DROPPED; + set_bit(NLP_DROPPED, &ndlp->nlp_flag); lpfc_nlp_put(ndlp); } } @@ -21094,11 +21109,7 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport) /* Unregister the RPI when mailbox complete */ mb->mbox_flag |= LPFC_MBX_IMED_UNREG; restart_loop = 1; - spin_unlock_irq(&phba->hbalock); - spin_lock(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; - spin_unlock(&ndlp->lock); - spin_lock_irq(&phba->hbalock); + clear_bit(NLP_IGNR_REG_CMPL, &ndlp->nlp_flag); break; } } @@ -21113,9 +21124,7 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport) ndlp = mb->ctx_ndlp; mb->ctx_ndlp = NULL; if (ndlp) { - spin_lock(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; - spin_unlock(&ndlp->lock); + clear_bit(NLP_IGNR_REG_CMPL, &ndlp->nlp_flag); lpfc_nlp_put(ndlp); } } @@ -21124,9 +21133,7 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport) /* Release the ndlp with the cleaned-up active mailbox command */ if (act_mbx_ndlp) { - spin_lock(&act_mbx_ndlp->lock); - act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; - spin_unlock(&act_mbx_ndlp->lock); + clear_bit(NLP_IGNR_REG_CMPL, &act_mbx_ndlp->nlp_flag); lpfc_nlp_put(act_mbx_ndlp); } } diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index e70f163fab90..61fe1220f8ad 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -20,7 +20,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "14.4.0.5" +#define LPFC_DRIVER_VERSION "14.4.0.6" #define LPFC_DRIVER_NAME "lpfc" /* Used for SLI 2/3 */ diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index 7a4d4d8e2ad5..9e0e35763377 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c @@ -496,7 +496,7 @@ lpfc_send_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) !ndlp->logo_waitq) { ndlp->logo_waitq = &waitq; ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; - ndlp->nlp_flag |= NLP_ISSUE_LOGO; + set_bit(NLP_ISSUE_LOGO, &ndlp->nlp_flag); ndlp->save_flags |= NLP_WAIT_FOR_LOGO; } spin_unlock_irq(&ndlp->lock); @@ -515,8 +515,8 @@ lpfc_send_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) } /* Error - clean up node flags. */ + clear_bit(NLP_ISSUE_LOGO, &ndlp->nlp_flag); spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_ISSUE_LOGO; ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO; spin_unlock_irq(&ndlp->lock); @@ -708,7 +708,7 @@ lpfc_vport_delete(struct fc_vport *fc_vport) lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT | LOG_ELS, "1829 DA_ID issue status %d. " - "SFlag x%x NState x%x, NFlag x%x " + "SFlag x%x NState x%x, NFlag x%lx " "Rpi x%x\n", rc, ndlp->save_flags, ndlp->nlp_state, ndlp->nlp_flag, ndlp->nlp_rpi); diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c index 187ae0a65d40..ff0253d47a0e 100644 --- a/drivers/scsi/mac_esp.c +++ b/drivers/scsi/mac_esp.c @@ -432,7 +432,7 @@ static void esp_mac_remove(struct platform_device *dev) static struct platform_driver esp_mac_driver = { .probe = esp_mac_probe, - .remove_new = esp_mac_remove, + .remove = esp_mac_remove, .driver = { .name = DRV_MODULE_NAME, }, diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c index f225bb20aa22..a86bd839d08e 100644 --- a/drivers/scsi/mac_scsi.c +++ b/drivers/scsi/mac_scsi.c @@ -547,7 +547,7 @@ static void __exit mac_scsi_remove(struct platform_device *pdev) * triggering a section mismatch warning. */ static struct platform_driver mac_scsi_driver __refdata = { - .remove_new = __exit_p(mac_scsi_remove), + .remove = __exit_p(mac_scsi_remove), .driver = { .name = DRV_MODULE_NAME, }, diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h index fcb0fa31536b..16e0baeb8799 100644 --- a/drivers/scsi/mpi3mr/mpi3mr.h +++ b/drivers/scsi/mpi3mr/mpi3mr.h @@ -542,8 +542,8 @@ struct mpi3mr_hba_port { * @port_list: List of ports belonging to a SAS node * @num_phys: Number of phys associated with port * @marked_responding: used while refresing the sas ports - * @lowest_phy: lowest phy ID of current sas port - * @phy_mask: phy_mask of current sas port + * @lowest_phy: lowest phy ID of current sas port, valid for controller port + * @phy_mask: phy_mask of current sas port, valid for controller port * @hba_port: HBA port entry * @remote_identify: Attached device identification * @rphy: SAS transport layer rphy object diff --git a/drivers/scsi/mpi3mr/mpi3mr_transport.c b/drivers/scsi/mpi3mr/mpi3mr_transport.c index ccd23def2e0c..0ba9e6a6a13c 100644 --- a/drivers/scsi/mpi3mr/mpi3mr_transport.c +++ b/drivers/scsi/mpi3mr/mpi3mr_transport.c @@ -590,12 +590,13 @@ static enum sas_linkrate mpi3mr_convert_phy_link_rate(u8 link_rate) * @mrioc: Adapter instance reference * @mr_sas_port: Internal Port object * @mr_sas_phy: Internal Phy object + * @host_node: Flag to indicate this is a host_node * * Return: None. */ static void mpi3mr_delete_sas_phy(struct mpi3mr_ioc *mrioc, struct mpi3mr_sas_port *mr_sas_port, - struct mpi3mr_sas_phy *mr_sas_phy) + struct mpi3mr_sas_phy *mr_sas_phy, u8 host_node) { u64 sas_address = mr_sas_port->remote_identify.sas_address; @@ -605,9 +606,13 @@ static void mpi3mr_delete_sas_phy(struct mpi3mr_ioc *mrioc, list_del(&mr_sas_phy->port_siblings); mr_sas_port->num_phys--; - mr_sas_port->phy_mask &= ~(1 << mr_sas_phy->phy_id); - if (mr_sas_port->lowest_phy == mr_sas_phy->phy_id) - mr_sas_port->lowest_phy = ffs(mr_sas_port->phy_mask) - 1; + + if (host_node) { + mr_sas_port->phy_mask &= ~(1 << mr_sas_phy->phy_id); + + if (mr_sas_port->lowest_phy == mr_sas_phy->phy_id) + mr_sas_port->lowest_phy = ffs(mr_sas_port->phy_mask) - 1; + } sas_port_delete_phy(mr_sas_port->port, mr_sas_phy->phy); mr_sas_phy->phy_belongs_to_port = 0; } @@ -617,12 +622,13 @@ static void mpi3mr_delete_sas_phy(struct mpi3mr_ioc *mrioc, * @mrioc: Adapter instance reference * @mr_sas_port: Internal Port object * @mr_sas_phy: Internal Phy object + * @host_node: Flag to indicate this is a host_node * * Return: None. */ static void mpi3mr_add_sas_phy(struct mpi3mr_ioc *mrioc, struct mpi3mr_sas_port *mr_sas_port, - struct mpi3mr_sas_phy *mr_sas_phy) + struct mpi3mr_sas_phy *mr_sas_phy, u8 host_node) { u64 sas_address = mr_sas_port->remote_identify.sas_address; @@ -632,9 +638,12 @@ static void mpi3mr_add_sas_phy(struct mpi3mr_ioc *mrioc, list_add_tail(&mr_sas_phy->port_siblings, &mr_sas_port->phy_list); mr_sas_port->num_phys++; - mr_sas_port->phy_mask |= (1 << mr_sas_phy->phy_id); - if (mr_sas_phy->phy_id < mr_sas_port->lowest_phy) - mr_sas_port->lowest_phy = ffs(mr_sas_port->phy_mask) - 1; + if (host_node) { + mr_sas_port->phy_mask |= (1 << mr_sas_phy->phy_id); + + if (mr_sas_phy->phy_id < mr_sas_port->lowest_phy) + mr_sas_port->lowest_phy = ffs(mr_sas_port->phy_mask) - 1; + } sas_port_add_phy(mr_sas_port->port, mr_sas_phy->phy); mr_sas_phy->phy_belongs_to_port = 1; } @@ -675,7 +684,7 @@ static void mpi3mr_add_phy_to_an_existing_port(struct mpi3mr_ioc *mrioc, if (srch_phy == mr_sas_phy) return; } - mpi3mr_add_sas_phy(mrioc, mr_sas_port, mr_sas_phy); + mpi3mr_add_sas_phy(mrioc, mr_sas_port, mr_sas_phy, mr_sas_node->host_node); return; } } @@ -736,7 +745,7 @@ static void mpi3mr_del_phy_from_an_existing_port(struct mpi3mr_ioc *mrioc, mpi3mr_delete_sas_port(mrioc, mr_sas_port); else mpi3mr_delete_sas_phy(mrioc, mr_sas_port, - mr_sas_phy); + mr_sas_phy, mr_sas_node->host_node); return; } } @@ -1028,7 +1037,7 @@ mpi3mr_alloc_hba_port(struct mpi3mr_ioc *mrioc, u16 port_id) /** * mpi3mr_get_hba_port_by_id - find hba port by id * @mrioc: Adapter instance reference - * @port_id - Port ID to search + * @port_id: Port ID to search * * Return: mpi3mr_hba_port reference for the matched port */ @@ -1367,7 +1376,8 @@ static struct mpi3mr_sas_port *mpi3mr_sas_port_add(struct mpi3mr_ioc *mrioc, mpi3mr_sas_port_sanity_check(mrioc, mr_sas_node, mr_sas_port->remote_identify.sas_address, hba_port); - if (mr_sas_node->num_phys >= sizeof(mr_sas_port->phy_mask) * 8) + if (mr_sas_node->host_node && mr_sas_node->num_phys >= + sizeof(mr_sas_port->phy_mask) * 8) ioc_info(mrioc, "max port count %u could be too high\n", mr_sas_node->num_phys); @@ -1377,7 +1387,7 @@ static struct mpi3mr_sas_port *mpi3mr_sas_port_add(struct mpi3mr_ioc *mrioc, (mr_sas_node->phy[i].hba_port != hba_port)) continue; - if (i >= sizeof(mr_sas_port->phy_mask) * 8) { + if (mr_sas_node->host_node && (i >= sizeof(mr_sas_port->phy_mask) * 8)) { ioc_warn(mrioc, "skipping port %u, max allowed value is %zu\n", i, sizeof(mr_sas_port->phy_mask) * 8); goto out_fail; @@ -1385,7 +1395,8 @@ static struct mpi3mr_sas_port *mpi3mr_sas_port_add(struct mpi3mr_ioc *mrioc, list_add_tail(&mr_sas_node->phy[i].port_siblings, &mr_sas_port->phy_list); mr_sas_port->num_phys++; - mr_sas_port->phy_mask |= (1 << i); + if (mr_sas_node->host_node) + mr_sas_port->phy_mask |= (1 << i); } if (!mr_sas_port->num_phys) { @@ -1394,7 +1405,8 @@ static struct mpi3mr_sas_port *mpi3mr_sas_port_add(struct mpi3mr_ioc *mrioc, goto out_fail; } - mr_sas_port->lowest_phy = ffs(mr_sas_port->phy_mask) - 1; + if (mr_sas_node->host_node) + mr_sas_port->lowest_phy = ffs(mr_sas_port->phy_mask) - 1; if (mr_sas_port->remote_identify.device_type == SAS_END_DEVICE) { tgtdev = mpi3mr_get_tgtdev_by_addr(mrioc, diff --git a/drivers/scsi/mvme16x_scsi.c b/drivers/scsi/mvme16x_scsi.c index e08a38e2a442..9b19d5205e50 100644 --- a/drivers/scsi/mvme16x_scsi.c +++ b/drivers/scsi/mvme16x_scsi.c @@ -127,7 +127,7 @@ static struct platform_driver mvme16x_scsi_driver = { .name = "mvme16x-scsi", }, .probe = mvme16x_probe, - .remove_new = mvme16x_device_remove, + .remove = mvme16x_device_remove, }; static int __init mvme16x_scsi_init(void) diff --git a/drivers/scsi/pm8001/pm8001_defs.h b/drivers/scsi/pm8001/pm8001_defs.h index 501b574239e8..7871e29a820a 100644 --- a/drivers/scsi/pm8001/pm8001_defs.h +++ b/drivers/scsi/pm8001/pm8001_defs.h @@ -92,8 +92,11 @@ enum port_type { #define PM8001_MAX_MSIX_VEC 64 /* max msi-x int for spcv/ve */ #define PM8001_RESERVE_SLOT 8 -#define CONFIG_SCSI_PM8001_MAX_DMA_SG 528 -#define PM8001_MAX_DMA_SG CONFIG_SCSI_PM8001_MAX_DMA_SG +#define PM8001_SECTOR_SIZE 512 +#define PM8001_PAGE_SIZE_4K 4096 +#define PM8001_MAX_IO_SIZE (4 * 1024 * 1024) +#define PM8001_MAX_DMA_SG (PM8001_MAX_IO_SIZE / PM8001_PAGE_SIZE_4K) +#define PM8001_MAX_SECTORS (PM8001_MAX_IO_SIZE / PM8001_SECTOR_SIZE) enum memory_region_num { AAP1 = 0x0, /* application acceleration processor */ diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c index 33e1eba62ca1..f8c81e53e93f 100644 --- a/drivers/scsi/pm8001/pm8001_init.c +++ b/drivers/scsi/pm8001/pm8001_init.c @@ -68,6 +68,10 @@ static bool pm8001_read_wwn = true; module_param_named(read_wwn, pm8001_read_wwn, bool, 0444); MODULE_PARM_DESC(zoned, "Get WWN from the controller. Default: true"); +uint pcs_event_log_severity = 0x03; +module_param(pcs_event_log_severity, int, 0644); +MODULE_PARM_DESC(pcs_event_log_severity, "PCS event log severity level"); + static struct scsi_transport_template *pm8001_stt; static int pm8001_init_ccb_tag(struct pm8001_hba_info *); @@ -117,6 +121,7 @@ static const struct scsi_host_template pm8001_sht = { .scan_start = pm8001_scan_start, .can_queue = 1, .sg_tablesize = PM8001_MAX_DMA_SG, + .max_sectors = PM8001_MAX_SECTORS, .shost_groups = pm8001_host_groups, .sdev_groups = pm8001_sdev_groups, .track_queue_depth = 1, @@ -447,9 +452,6 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha, } for (i = 0; i < PM8001_MAX_DEVICES; i++) { pm8001_ha->devices[i].dev_type = SAS_PHY_UNUSED; - pm8001_ha->devices[i].id = i; - pm8001_ha->devices[i].device_id = PM8001_MAX_DEVICES; - atomic_set(&pm8001_ha->devices[i].running_req, 0); } pm8001_ha->flags = PM8001F_INIT_TIME; return 0; diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c index ee2da8e49d4c..d80cffd25a6e 100644 --- a/drivers/scsi/pm8001/pm8001_sas.c +++ b/drivers/scsi/pm8001/pm8001_sas.c @@ -572,6 +572,13 @@ void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha, pm8001_ccb_free(pm8001_ha, ccb); } +static void pm8001_init_dev(struct pm8001_device *pm8001_dev, int id) +{ + pm8001_dev->id = id; + pm8001_dev->device_id = PM8001_MAX_DEVICES; + atomic_set(&pm8001_dev->running_req, 0); +} + /** * pm8001_alloc_dev - find a empty pm8001_device * @pm8001_ha: our hba card information @@ -580,9 +587,11 @@ static struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha) { u32 dev; for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) { - if (pm8001_ha->devices[dev].dev_type == SAS_PHY_UNUSED) { - pm8001_ha->devices[dev].id = dev; - return &pm8001_ha->devices[dev]; + struct pm8001_device *pm8001_dev = &pm8001_ha->devices[dev]; + + if (pm8001_dev->dev_type == SAS_PHY_UNUSED) { + pm8001_init_dev(pm8001_dev, dev); + return pm8001_dev; } } if (dev == PM8001_MAX_DEVICES) { @@ -613,9 +622,7 @@ struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha, void pm8001_free_dev(struct pm8001_device *pm8001_dev) { - u32 id = pm8001_dev->id; memset(pm8001_dev, 0, sizeof(*pm8001_dev)); - pm8001_dev->id = id; pm8001_dev->dev_type = SAS_PHY_UNUSED; pm8001_dev->device_id = PM8001_MAX_DEVICES; pm8001_dev->sas_device = NULL; diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h index ced6721380a8..42c7b3f7afbf 100644 --- a/drivers/scsi/pm8001/pm8001_sas.h +++ b/drivers/scsi/pm8001/pm8001_sas.h @@ -96,6 +96,8 @@ extern struct list_head hba_list; extern const struct pm8001_dispatch pm8001_8001_dispatch; extern const struct pm8001_dispatch pm8001_80xx_dispatch; +extern uint pcs_event_log_severity; + struct pm8001_hba_info; struct pm8001_ccb_info; struct pm8001_device; diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c index a9869cd8c4c0..e65951dd2024 100644 --- a/drivers/scsi/pm8001/pm80xx_hwi.c +++ b/drivers/scsi/pm8001/pm80xx_hwi.c @@ -763,7 +763,8 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha) pm8001_ha->memoryMap.region[IOP].phys_addr_lo; pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_size = PM8001_EVENT_LOG_SIZE; - pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity = 0x01; + pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity = + pcs_event_log_severity; pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt = 0x01; /* Enable higher IQs and OQs, 32 to 63, bit 16 */ diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index cf13148ba281..d2f47dc31dbf 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c @@ -2738,6 +2738,7 @@ static int qedf_alloc_and_init_sb(struct qedf_ctx *qedf, sb_id, QED_SB_TYPE_STORAGE); if (ret) { + dma_free_coherent(&qedf->pdev->dev, sizeof(*sb_virt), sb_virt, sb_phys); QEDF_ERR(&qedf->dbg_ctx, "Status block initialization failed (0x%x) for id = %d.\n", ret, sb_id); @@ -4018,11 +4019,6 @@ void qedf_stag_change_work(struct work_struct *work) struct qedf_ctx *qedf = container_of(work, struct qedf_ctx, stag_work.work); - if (!qedf) { - QEDF_ERR(&qedf->dbg_ctx, "qedf is NULL"); - return; - } - if (test_bit(QEDF_IN_RECOVERY, &qedf->flags)) { QEDF_ERR(&qedf->dbg_ctx, "Already is in recovery, hence not calling software context reset.\n"); diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c index c5aec26019d6..628d59dda20c 100644 --- a/drivers/scsi/qedi/qedi_main.c +++ b/drivers/scsi/qedi/qedi_main.c @@ -369,6 +369,7 @@ static int qedi_alloc_and_init_sb(struct qedi_ctx *qedi, ret = qedi_ops->common->sb_init(qedi->cdev, sb_info, sb_virt, sb_phys, sb_id, QED_SB_TYPE_STORAGE); if (ret) { + dma_free_coherent(&qedi->pdev->dev, sizeof(*sb_virt), sb_virt, sb_phys); QEDI_ERR(&qedi->dbg_ctx, "Status block initialization failed for id = %d.\n", sb_id); diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c index 6177f4798f3a..74866b9f2b14 100644 --- a/drivers/scsi/qlogicpti.c +++ b/drivers/scsi/qlogicpti.c @@ -1463,7 +1463,7 @@ static struct platform_driver qpti_sbus_driver = { .of_match_table = qpti_match, }, .probe = qpti_sbus_probe, - .remove_new = qpti_sbus_remove, + .remove = qpti_sbus_remove, }; module_platform_driver(qpti_sbus_driver); diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index d95f417e24c0..9be2a6a00530 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -3651,7 +3651,7 @@ static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp, enum dma_data_direction dir; struct scsi_data_buffer *sdb = &scp->sdb; u8 *fsp; - int i; + int i, total = 0; /* * Even though reads are inherently atomic (in this driver), we expect @@ -3688,18 +3688,16 @@ static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp, fsp + (block * sdebug_sector_size), sdebug_sector_size, sg_skip, do_write); sdeb_data_sector_unlock(sip, do_write); - if (ret != sdebug_sector_size) { - ret += (i * sdebug_sector_size); + total += ret; + if (ret != sdebug_sector_size) break; - } sg_skip += sdebug_sector_size; if (++block >= sdebug_store_sectors) block = 0; } - ret = num * sdebug_sector_size; sdeb_data_unlock(sip, atomic); - return ret; + return total; } /* Returns number of bytes copied or -1 if error. */ diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c index c8b9654d30f0..a4d17f3da25d 100644 --- a/drivers/scsi/sd_zbc.c +++ b/drivers/scsi/sd_zbc.c @@ -188,8 +188,7 @@ static void *sd_zbc_alloc_report_buffer(struct scsi_disk *sdkp, bufsize = min_t(size_t, bufsize, queue_max_segments(q) << PAGE_SHIFT); while (bufsize >= SECTOR_SIZE) { - buf = __vmalloc(bufsize, - GFP_KERNEL | __GFP_ZERO | __GFP_NORETRY); + buf = kvzalloc(bufsize, GFP_KERNEL | __GFP_NORETRY); if (buf) { *buflen = bufsize; return buf; diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index f86be197fedd..84334ab39c81 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -307,10 +307,6 @@ sg_open(struct inode *inode, struct file *filp) if (retval) goto sg_put; - retval = scsi_autopm_get_device(device); - if (retval) - goto sdp_put; - /* scsi_block_when_processing_errors() may block so bypass * check if O_NONBLOCK. Permits SCSI commands to be issued * during error recovery. Tread carefully. */ @@ -318,7 +314,7 @@ sg_open(struct inode *inode, struct file *filp) scsi_block_when_processing_errors(device))) { retval = -ENXIO; /* we are in error recovery for this device */ - goto error_out; + goto sdp_put; } mutex_lock(&sdp->open_rel_lock); @@ -371,8 +367,6 @@ sg_open(struct inode *inode, struct file *filp) } error_mutex_locked: mutex_unlock(&sdp->open_rel_lock); -error_out: - scsi_autopm_put_device(device); sdp_put: kref_put(&sdp->d_ref, sg_device_destroy); scsi_device_put(device); @@ -392,7 +386,6 @@ sg_release(struct inode *inode, struct file *filp) SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_release\n")); mutex_lock(&sdp->open_rel_lock); - scsi_autopm_put_device(sdp->device); kref_put(&sfp->f_ref, sg_remove_sfp); sdp->open_cnt--; diff --git a/drivers/scsi/sgiwd93.c b/drivers/scsi/sgiwd93.c index b0bef83db7b6..6594661db5f4 100644 --- a/drivers/scsi/sgiwd93.c +++ b/drivers/scsi/sgiwd93.c @@ -306,7 +306,7 @@ static void sgiwd93_remove(struct platform_device *pdev) static struct platform_driver sgiwd93_driver = { .probe = sgiwd93_probe, - .remove_new = sgiwd93_remove, + .remove = sgiwd93_remove, .driver = { .name = "sgiwd93", } diff --git a/drivers/scsi/sni_53c710.c b/drivers/scsi/sni_53c710.c index 9df1c90a24c1..d1d2556c8fc4 100644 --- a/drivers/scsi/sni_53c710.c +++ b/drivers/scsi/sni_53c710.c @@ -119,7 +119,7 @@ static void snirm710_driver_remove(struct platform_device *dev) static struct platform_driver snirm710_driver = { .probe = snirm710_probe, - .remove_new = snirm710_driver_remove, + .remove = snirm710_driver_remove, .driver = { .name = "snirm_53c710", }, diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index d50bad3a2ce9..fe0618ac9769 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c @@ -991,7 +991,10 @@ static int test_ready(struct scsi_tape *STp, int do_wait) scode = cmdstatp->sense_hdr.sense_key; if (scode == UNIT_ATTENTION) { /* New media? */ - new_session = 1; + if (cmdstatp->sense_hdr.asc == 0x28) { /* New media */ + new_session = 1; + DEBC_printk(STp, "New tape session."); + } if (attentions < MAX_ATTENTIONS) { attentions++; continue; @@ -3506,6 +3509,7 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg) int i, cmd_nr, cmd_type, bt; int retval = 0; unsigned int blk; + bool cmd_mtiocget; struct scsi_tape *STp = file->private_data; struct st_modedef *STm; struct st_partstat *STps; @@ -3619,6 +3623,7 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg) */ if (mtc.mt_op != MTREW && mtc.mt_op != MTOFFL && + mtc.mt_op != MTLOAD && mtc.mt_op != MTRETEN && mtc.mt_op != MTERASE && mtc.mt_op != MTSEEK && @@ -3732,17 +3737,28 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg) goto out; } + cmd_mtiocget = cmd_type == _IOC_TYPE(MTIOCGET) && cmd_nr == _IOC_NR(MTIOCGET); + if ((i = flush_buffer(STp, 0)) < 0) { - retval = i; - goto out; - } - if (STp->can_partitions && - (i = switch_partition(STp)) < 0) { - retval = i; - goto out; + if (cmd_mtiocget && STp->pos_unknown) { + /* flush fails -> modify status accordingly */ + reset_state(STp); + STp->pos_unknown = 1; + } else { /* return error */ + retval = i; + goto out; + } + } else { /* flush_buffer succeeds */ + if (STp->can_partitions) { + i = switch_partition(STp); + if (i < 0) { + retval = i; + goto out; + } + } } - if (cmd_type == _IOC_TYPE(MTIOCGET) && cmd_nr == _IOC_NR(MTIOCGET)) { + if (cmd_mtiocget) { struct mtget mt_status; if (_IOC_SIZE(cmd_in) != sizeof(struct mtget)) { @@ -3756,7 +3772,7 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg) ((STp->density << MT_ST_DENSITY_SHIFT) & MT_ST_DENSITY_MASK); mt_status.mt_blkno = STps->drv_block; mt_status.mt_fileno = STps->drv_file; - if (STp->block_size != 0) { + if (STp->block_size != 0 && mt_status.mt_blkno >= 0) { if (STps->rw == ST_WRITING) mt_status.mt_blkno += (STp->buffer)->buffer_bytes / STp->block_size; diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c index fffc0fa52594..ca9cd691cc32 100644 --- a/drivers/scsi/sun3_scsi.c +++ b/drivers/scsi/sun3_scsi.c @@ -656,8 +656,14 @@ static void __exit sun3_scsi_remove(struct platform_device *pdev) iounmap(ioaddr); } -static struct platform_driver sun3_scsi_driver = { - .remove_new = __exit_p(sun3_scsi_remove), +/* + * sun3_scsi_remove() lives in .exit.text. For drivers registered via + * module_platform_driver_probe() this is ok because they cannot get unbound at + * runtime. So mark the driver struct with __refdata to prevent modpost + * triggering a section mismatch warning. + */ +static struct platform_driver sun3_scsi_driver __refdata = { + .remove = __exit_p(sun3_scsi_remove), .driver = { .name = DRV_MODULE_NAME, }, diff --git a/drivers/scsi/sun3x_esp.c b/drivers/scsi/sun3x_esp.c index e20f314cf3e7..365406885b8e 100644 --- a/drivers/scsi/sun3x_esp.c +++ b/drivers/scsi/sun3x_esp.c @@ -265,7 +265,7 @@ static void esp_sun3x_remove(struct platform_device *dev) static struct platform_driver esp_sun3x_driver = { .probe = esp_sun3x_probe, - .remove_new = esp_sun3x_remove, + .remove = esp_sun3x_remove, .driver = { .name = "sun3x_esp", }, diff --git a/drivers/scsi/sun_esp.c b/drivers/scsi/sun_esp.c index 5ce6c9d19d1e..aa430501f0c7 100644 --- a/drivers/scsi/sun_esp.c +++ b/drivers/scsi/sun_esp.c @@ -603,7 +603,7 @@ static struct platform_driver esp_sbus_driver = { .of_match_table = esp_match, }, .probe = esp_sbus_probe, - .remove_new = esp_sbus_remove, + .remove = esp_sbus_remove, }; module_platform_driver(esp_sbus_driver); diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index bf4892544cfd..bb84d304b07e 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -691,7 +691,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name) dev->queues = kcalloc(nr_cpu_ids, sizeof(*dev->queues), GFP_KERNEL); if (!dev->queues) { - dev->transport->free_device(dev); + hba->backend->ops->free_device(dev); return NULL; } diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index f98ebb18666b..da7017113f92 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -369,7 +369,7 @@ static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd) bdev_file = bdev_file_open_by_path(dev->udev_path, BLK_OPEN_WRITE | BLK_OPEN_READ, pdv, NULL); if (IS_ERR(bdev_file)) { - pr_err("pSCSI: bdev_open_by_path() failed\n"); + pr_err("pSCSI: bdev_file_open_by_path() failed\n"); scsi_device_put(sd); return PTR_ERR(bdev_file); } diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c index 5891cdacd0b3..24c10e62cef9 100644 --- a/drivers/ufs/core/ufs-mcq.c +++ b/drivers/ufs/core/ufs-mcq.c @@ -417,13 +417,6 @@ void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba) } EXPORT_SYMBOL_GPL(ufshcd_mcq_make_queues_operational); -void ufshcd_mcq_enable_esi(struct ufs_hba *hba) -{ - ufshcd_writel(hba, ufshcd_readl(hba, REG_UFS_MEM_CFG) | 0x2, - REG_UFS_MEM_CFG); -} -EXPORT_SYMBOL_GPL(ufshcd_mcq_enable_esi); - void ufshcd_mcq_enable(struct ufs_hba *hba) { ufshcd_rmwl(hba, MCQ_MODE_SELECT, MCQ_MODE_SELECT, REG_UFS_MEM_CFG); @@ -437,6 +430,13 @@ void ufshcd_mcq_disable(struct ufs_hba *hba) hba->mcq_enabled = false; } +void ufshcd_mcq_enable_esi(struct ufs_hba *hba) +{ + ufshcd_writel(hba, ufshcd_readl(hba, REG_UFS_MEM_CFG) | 0x2, + REG_UFS_MEM_CFG); +} +EXPORT_SYMBOL_GPL(ufshcd_mcq_enable_esi); + void ufshcd_mcq_config_esi(struct ufs_hba *hba, struct msi_msg *msg) { ufshcd_writel(hba, msg->address_lo, REG_UFS_ESILBA); @@ -569,17 +569,22 @@ int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag) opr_sqd_base = mcq_opr_base(hba, OPR_SQD, id); writel(nexus, opr_sqd_base + REG_SQCTI); - /* SQRTCy.ICU = 1 */ - writel(SQ_ICU, opr_sqd_base + REG_SQRTC); + /* Initiate Cleanup */ + writel(readl(opr_sqd_base + REG_SQRTC) | SQ_ICU, + opr_sqd_base + REG_SQRTC); - /* Poll SQRTSy.CUS = 1. Return result from SQRTSy.RTC */ + /* Wait until SQRTSy.CUS = 1. Report SQRTSy.RTC. */ reg = opr_sqd_base + REG_SQRTS; err = read_poll_timeout(readl, val, val & SQ_CUS, 20, MCQ_POLL_US, false, reg); if (err) - dev_err(hba->dev, "%s: failed. hwq=%d, tag=%d err=%ld\n", - __func__, id, task_tag, - FIELD_GET(SQ_ICU_ERR_CODE_MASK, readl(reg))); + dev_err(hba->dev, "%s: failed. hwq=%d, tag=%d err=%d\n", + __func__, id, task_tag, err); + else + dev_info(hba->dev, + "%s, hwq %d: cleanup return code (RTC) %ld\n", + __func__, id, + FIELD_GET(SQ_ICU_ERR_CODE_MASK, readl(reg))); if (ufshcd_mcq_sq_start(hba, hwq)) err = -ETIMEDOUT; diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index 6a71ebf953e2..8135a6dad7c6 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -298,6 +298,7 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba); static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd); static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag); static void ufshcd_hba_exit(struct ufs_hba *hba); +static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params); static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params); static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on); static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba); @@ -349,18 +350,6 @@ static void ufshcd_configure_wb(struct ufs_hba *hba) ufshcd_wb_toggle_buf_flush(hba, true); } -static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba) -{ - if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt)) - scsi_unblock_requests(hba->host); -} - -static void ufshcd_scsi_block_requests(struct ufs_hba *hba) -{ - if (atomic_inc_return(&hba->scsi_block_reqs_cnt) == 1) - scsi_block_requests(hba->host); -} - static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag, enum ufs_trace_str_t str_t) { @@ -739,25 +728,15 @@ EXPORT_SYMBOL_GPL(ufshcd_delay_us); * Return: -ETIMEDOUT on error, zero on success. */ static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask, - u32 val, unsigned long interval_us, - unsigned long timeout_ms) + u32 val, unsigned long interval_us, + unsigned long timeout_ms) { - int err = 0; - unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms); - - /* ignore bits that we don't intend to wait on */ - val = val & mask; + u32 v; - while ((ufshcd_readl(hba, reg) & mask) != val) { - usleep_range(interval_us, interval_us + 50); - if (time_after(jiffies, timeout)) { - if ((ufshcd_readl(hba, reg) & mask) != val) - err = -ETIMEDOUT; - break; - } - } + val &= mask; /* ignore bits that we don't intend to wait on */ - return err; + return read_poll_timeout(ufshcd_readl, v, (v & mask) == val, + interval_us, timeout_ms * 1000, false, hba, reg); } /** @@ -1255,11 +1234,13 @@ static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba, static u32 ufshcd_pending_cmds(struct ufs_hba *hba) { const struct scsi_device *sdev; + unsigned long flags; u32 pending = 0; - lockdep_assert_held(hba->host->host_lock); + spin_lock_irqsave(hba->host->host_lock, flags); __shost_for_each_device(sdev, hba->host) pending += sbitmap_weight(&sdev->budget_map); + spin_unlock_irqrestore(hba->host->host_lock, flags); return pending; } @@ -1273,7 +1254,6 @@ static u32 ufshcd_pending_cmds(struct ufs_hba *hba) static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, u64 wait_timeout_us) { - unsigned long flags; int ret = 0; u32 tm_doorbell; u32 tr_pending; @@ -1281,7 +1261,6 @@ static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, ktime_t start; ufshcd_hold(hba); - spin_lock_irqsave(hba->host->host_lock, flags); /* * Wait for all the outstanding tasks/transfer requests. * Verify by checking the doorbell registers are clear. @@ -1302,7 +1281,6 @@ static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, break; } - spin_unlock_irqrestore(hba->host->host_lock, flags); io_schedule_timeout(msecs_to_jiffies(20)); if (ktime_to_us(ktime_sub(ktime_get(), start)) > wait_timeout_us) { @@ -1314,7 +1292,6 @@ static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, */ do_last_check = true; } - spin_lock_irqsave(hba->host->host_lock, flags); } while (tm_doorbell || tr_pending); if (timeout) { @@ -1324,7 +1301,6 @@ static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, ret = -EBUSY; } out: - spin_unlock_irqrestore(hba->host->host_lock, flags); ufshcd_release(hba); return ret; } @@ -2411,8 +2387,6 @@ static inline int ufshcd_hba_capabilities(struct ufs_hba *hba) int err; hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES); - if (hba->quirks & UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS) - hba->capabilities &= ~MASK_64_ADDRESSING_SUPPORT; /* nutrs and nutmrs are 0 based values */ hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS_SDB) + 1; @@ -2551,13 +2525,11 @@ ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result * @hba: per adapter instance * @uic_cmd: UIC command - * @completion: initialize the completion only if this is set to true * * Return: 0 only if success. */ static int -__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd, - bool completion) +__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) { lockdep_assert_held(&hba->uic_cmd_mutex); @@ -2567,8 +2539,7 @@ __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd, return -EIO; } - if (completion) - init_completion(&uic_cmd->done); + init_completion(&uic_cmd->done); uic_cmd->cmd_active = 1; ufshcd_dispatch_uic_cmd(hba, uic_cmd); @@ -2594,7 +2565,7 @@ int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) mutex_lock(&hba->uic_cmd_mutex); ufshcd_add_delay_before_dme_cmd(hba); - ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true); + ret = __ufshcd_send_uic_cmd(hba, uic_cmd); if (!ret) ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd); @@ -2775,7 +2746,6 @@ void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u8 upiu_flags) ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(cmd->sdb.length); cdb_len = min_t(unsigned short, cmd->cmd_len, UFS_CDB_SIZE); - memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE); memcpy(ucd_req_ptr->sc.cdb, cmd->cmnd, cdb_len); memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); @@ -2878,6 +2848,26 @@ static void ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags); } +static void __ufshcd_setup_cmd(struct ufshcd_lrb *lrbp, struct scsi_cmnd *cmd, u8 lun, int tag) +{ + memset(lrbp->ucd_req_ptr, 0, sizeof(*lrbp->ucd_req_ptr)); + + lrbp->cmd = cmd; + lrbp->task_tag = tag; + lrbp->lun = lun; + ufshcd_prepare_lrbp_crypto(cmd ? scsi_cmd_to_rq(cmd) : NULL, lrbp); +} + +static void ufshcd_setup_scsi_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, + struct scsi_cmnd *cmd, u8 lun, int tag) +{ + __ufshcd_setup_cmd(lrbp, cmd, lun, tag); + lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba); + lrbp->req_abort_skip = false; + + ufshcd_comp_scsi_upiu(hba, lrbp); +} + /** * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID * @upiu_wlun_id: UPIU W-LUN id @@ -3010,16 +3000,8 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) ufshcd_hold(hba); lrbp = &hba->lrb[tag]; - lrbp->cmd = cmd; - lrbp->task_tag = tag; - lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun); - lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba); - - ufshcd_prepare_lrbp_crypto(scsi_cmd_to_rq(cmd), lrbp); - lrbp->req_abort_skip = false; - - ufshcd_comp_scsi_upiu(hba, lrbp); + ufshcd_setup_scsi_cmd(hba, lrbp, cmd, ufshcd_scsi_to_upiu_lun(cmd->device->lun), tag); err = ufshcd_map_sg(hba, lrbp); if (err) { @@ -3047,11 +3029,8 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) static void ufshcd_setup_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, u8 lun, int tag) { - lrbp->cmd = NULL; - lrbp->task_tag = tag; - lrbp->lun = lun; + __ufshcd_setup_cmd(lrbp, NULL, lun, tag); lrbp->intr_cmd = true; /* No interrupt aggregation */ - ufshcd_prepare_lrbp_crypto(NULL, lrbp); hba->dev_cmd.type = cmd_type; } @@ -3083,7 +3062,6 @@ bool ufshcd_cmd_inflight(struct scsi_cmnd *cmd) static int ufshcd_clear_cmd(struct ufs_hba *hba, u32 task_tag) { u32 mask; - unsigned long flags; int err; if (hba->mcq_enabled) { @@ -3103,9 +3081,7 @@ static int ufshcd_clear_cmd(struct ufs_hba *hba, u32 task_tag) mask = 1U << task_tag; /* clear outstanding transaction before retry */ - spin_lock_irqsave(hba->host->host_lock, flags); ufshcd_utrl_clear(hba, mask); - spin_unlock_irqrestore(hba->host->host_lock, flags); /* * wait for h/w to clear corresponding bit in door-bell. @@ -4288,7 +4264,7 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd) reenable_intr = true; } spin_unlock_irqrestore(hba->host->host_lock, flags); - ret = __ufshcd_send_uic_cmd(hba, cmd, false); + ret = __ufshcd_send_uic_cmd(hba, cmd); if (ret) { dev_err(hba->dev, "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n", @@ -4539,6 +4515,14 @@ static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba) return -EINVAL; } + if (pwr_info->lane_rx != pwr_info->lane_tx) { + dev_err(hba->dev, "%s: asymmetric connected lanes. rx=%d, tx=%d\n", + __func__, + pwr_info->lane_rx, + pwr_info->lane_tx); + return -EINVAL; + } + /* * First, get the maximum gears of HS speed. * If a zero value, it means there is no HSGEAR capability. @@ -4822,51 +4806,44 @@ EXPORT_SYMBOL_GPL(ufshcd_hba_stop); */ static int ufshcd_hba_execute_hce(struct ufs_hba *hba) { - int retry_outer = 3; - int retry_inner; + int retry; -start: - if (ufshcd_is_hba_active(hba)) - /* change controller state to "reset state" */ - ufshcd_hba_stop(hba); + for (retry = 3; retry > 0; retry--) { + if (ufshcd_is_hba_active(hba)) + /* change controller state to "reset state" */ + ufshcd_hba_stop(hba); - /* UniPro link is disabled at this point */ - ufshcd_set_link_off(hba); + /* UniPro link is disabled at this point */ + ufshcd_set_link_off(hba); - ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE); + ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE); - /* start controller initialization sequence */ - ufshcd_hba_start(hba); + /* start controller initialization sequence */ + ufshcd_hba_start(hba); - /* - * To initialize a UFS host controller HCE bit must be set to 1. - * During initialization the HCE bit value changes from 1->0->1. - * When the host controller completes initialization sequence - * it sets the value of HCE bit to 1. The same HCE bit is read back - * to check if the controller has completed initialization sequence. - * So without this delay the value HCE = 1, set in the previous - * instruction might be read back. - * This delay can be changed based on the controller. - */ - ufshcd_delay_us(hba->vps->hba_enable_delay_us, 100); + /* + * To initialize a UFS host controller HCE bit must be set to 1. + * During initialization the HCE bit value changes from 1->0->1. + * When the host controller completes initialization sequence + * it sets the value of HCE bit to 1. The same HCE bit is read back + * to check if the controller has completed initialization sequence. + * So without this delay the value HCE = 1, set in the previous + * instruction might be read back. + * This delay can be changed based on the controller. + */ + ufshcd_delay_us(hba->vps->hba_enable_delay_us, 100); - /* wait for the host controller to complete initialization */ - retry_inner = 50; - while (!ufshcd_is_hba_active(hba)) { - if (retry_inner) { - retry_inner--; - } else { - dev_err(hba->dev, - "Controller enable failed\n"); - if (retry_outer) { - retry_outer--; - goto start; - } - return -EIO; - } - usleep_range(1000, 1100); + /* wait for the host controller to complete initialization */ + if (!ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE, CONTROLLER_ENABLE, + CONTROLLER_ENABLE, 1000, 50)) + break; + + dev_err(hba->dev, "Enabling the controller failed\n"); } + if (!retry) + return -EIO; + /* enable UIC related interrupts */ ufshcd_enable_intr(hba, UFSHCD_UIC_MASK); @@ -5258,6 +5235,9 @@ static int ufshcd_device_configure(struct scsi_device *sdev, */ sdev->silence_suspend = 1; + if (hba->vops && hba->vops->config_scsi_dev) + hba->vops->config_scsi_dev(sdev); + ufshcd_crypto_register(hba, q); return 0; @@ -5416,10 +5396,12 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, } break; case OCS_ABORTED: - result |= DID_ABORT << 16; - break; case OCS_INVALID_COMMAND_STATUS: result |= DID_REQUEUE << 16; + dev_warn(hba->dev, + "OCS %s from controller for tag %d\n", + (ocs == OCS_ABORTED ? "aborted" : "invalid"), + lrbp->task_tag); break; case OCS_INVALID_CMD_TABLE_ATTR: case OCS_INVALID_PRDT_ATTR: @@ -5476,32 +5458,37 @@ static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba, static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status) { irqreturn_t retval = IRQ_NONE; + struct uic_command *cmd; spin_lock(hba->host->host_lock); + cmd = hba->active_uic_cmd; + if (WARN_ON_ONCE(!cmd)) + goto unlock; + if (ufshcd_is_auto_hibern8_error(hba, intr_status)) hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status); - if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) { - hba->active_uic_cmd->argument2 |= - ufshcd_get_uic_cmd_result(hba); - hba->active_uic_cmd->argument3 = - ufshcd_get_dme_attr_val(hba); + if (intr_status & UIC_COMMAND_COMPL) { + cmd->argument2 |= ufshcd_get_uic_cmd_result(hba); + cmd->argument3 = ufshcd_get_dme_attr_val(hba); if (!hba->uic_async_done) - hba->active_uic_cmd->cmd_active = 0; - complete(&hba->active_uic_cmd->done); + cmd->cmd_active = 0; + complete(&cmd->done); retval = IRQ_HANDLED; } - if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) { - hba->active_uic_cmd->cmd_active = 0; + if (intr_status & UFSHCD_UIC_PWR_MASK && hba->uic_async_done) { + cmd->cmd_active = 0; complete(hba->uic_async_done); retval = IRQ_HANDLED; } if (retval == IRQ_HANDLED) - ufshcd_add_uic_command_trace(hba, hba->active_uic_cmd, - UFS_CMD_COMP); + ufshcd_add_uic_command_trace(hba, cmd, UFS_CMD_COMP); + +unlock: spin_unlock(hba->host->host_lock); + return retval; } @@ -6194,12 +6181,11 @@ static void ufshcd_exception_event_handler(struct work_struct *work) u32 status = 0; hba = container_of(work, struct ufs_hba, eeh_work); - ufshcd_scsi_block_requests(hba); err = ufshcd_get_ee_status(hba, &status); if (err) { dev_err(hba->dev, "%s: failed to get exception status %d\n", __func__, err); - goto out; + return; } trace_ufshcd_exception_event(dev_name(hba->dev), status); @@ -6211,8 +6197,6 @@ static void ufshcd_exception_event_handler(struct work_struct *work) ufshcd_temp_exception_event_handler(hba, status); ufs_debugfs_exception_event(hba, status); -out: - ufshcd_scsi_unblock_requests(hba); } /* Complete requests that have door-bell cleared */ @@ -6378,15 +6362,14 @@ static void ufshcd_err_handling_prepare(struct ufs_hba *hba) ufshcd_suspend_clkscaling(hba); ufshcd_clk_scaling_allow(hba, false); } - ufshcd_scsi_block_requests(hba); /* Wait for ongoing ufshcd_queuecommand() calls to finish. */ - blk_mq_wait_quiesce_done(&hba->host->tag_set); + blk_mq_quiesce_tagset(&hba->host->tag_set); cancel_work_sync(&hba->eeh_work); } static void ufshcd_err_handling_unprepare(struct ufs_hba *hba) { - ufshcd_scsi_unblock_requests(hba); + blk_mq_unquiesce_tagset(&hba->host->tag_set); ufshcd_release(hba); if (ufshcd_is_clkscaling_supported(hba)) ufshcd_clk_scaling_suspend(hba, false); @@ -6465,26 +6448,12 @@ static bool ufshcd_abort_one(struct request *rq, void *priv) struct scsi_device *sdev = cmd->device; struct Scsi_Host *shost = sdev->host; struct ufs_hba *hba = shost_priv(shost); - struct ufshcd_lrb *lrbp = &hba->lrb[tag]; - struct ufs_hw_queue *hwq; - unsigned long flags; *ret = ufshcd_try_to_abort_task(hba, tag); dev_err(hba->dev, "Aborting tag %d / CDB %#02x %s\n", tag, hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1, *ret ? "failed" : "succeeded"); - /* Release cmd in MCQ mode if abort succeeds */ - if (hba->mcq_enabled && (*ret == 0)) { - hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(lrbp->cmd)); - if (!hwq) - return 0; - spin_lock_irqsave(&hwq->cq_lock, flags); - if (ufshcd_cmd_inflight(lrbp->cmd)) - ufshcd_release_scsi_cmd(hba, lrbp); - spin_unlock_irqrestore(&hwq->cq_lock, flags); - } - return *ret == 0; } @@ -7014,14 +6983,11 @@ static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag) { int err = 0; u32 mask = 1 << tag; - unsigned long flags; if (!test_bit(tag, &hba->outstanding_tasks)) goto out; - spin_lock_irqsave(hba->host->host_lock, flags); ufshcd_utmrl_clear(hba, tag); - spin_unlock_irqrestore(hba->host->host_lock, flags); /* poll for max. 1 sec to clear door bell register by h/w */ err = ufshcd_wait_for_register(hba, @@ -7064,12 +7030,13 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba, memcpy(hba->utmrdl_base_addr + task_tag, treq, sizeof(*treq)); ufshcd_vops_setup_task_mgmt(hba, task_tag, tm_function); - /* send command to the controller */ __set_bit(task_tag, &hba->outstanding_tasks); - ufshcd_writel(hba, 1 << task_tag, REG_UTP_TASK_REQ_DOOR_BELL); spin_unlock_irqrestore(host->host_lock, flags); + /* send command to the controller */ + ufshcd_writel(hba, 1 << task_tag, REG_UTP_TASK_REQ_DOOR_BELL); + ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_SEND); /* wait until the task management command is completed */ @@ -7485,10 +7452,9 @@ static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap) int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag) { struct ufshcd_lrb *lrbp = &hba->lrb[tag]; - int err = 0; + int err; int poll_cnt; u8 resp = 0xF; - u32 reg; for (poll_cnt = 100; poll_cnt; poll_cnt--) { err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag, @@ -7503,46 +7469,27 @@ int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag) * cmd not pending in the device, check if it is * in transition. */ - dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n", + dev_info( + hba->dev, + "%s: cmd with tag %d not pending in the device.\n", __func__, tag); - if (hba->mcq_enabled) { - /* MCQ mode */ - if (ufshcd_cmd_inflight(lrbp->cmd)) { - /* sleep for max. 200us same delay as in SDB mode */ - usleep_range(100, 200); - continue; - } - /* command completed already */ - dev_err(hba->dev, "%s: cmd at tag=%d is cleared.\n", - __func__, tag); - goto out; - } - - /* Single Doorbell Mode */ - reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); - if (reg & (1 << tag)) { - /* sleep for max. 200us to stabilize */ - usleep_range(100, 200); - continue; + if (!ufshcd_cmd_inflight(lrbp->cmd)) { + dev_info(hba->dev, + "%s: cmd with tag=%d completed.\n", + __func__, tag); + return 0; } - /* command completed already */ - dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n", - __func__, tag); - goto out; + usleep_range(100, 200); } else { dev_err(hba->dev, "%s: no response from device. tag = %d, err %d\n", __func__, tag, err); - if (!err) - err = resp; /* service response error */ - goto out; + return err ? : resp; } } - if (!poll_cnt) { - err = -EBUSY; - goto out; - } + if (!poll_cnt) + return -EBUSY; err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag, UFS_ABORT_TASK, &resp); @@ -7552,7 +7499,7 @@ int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag) dev_err(hba->dev, "%s: issued. tag = %d, err %d\n", __func__, tag, err); } - goto out; + return err; } err = ufshcd_clear_cmd(hba, tag); @@ -7560,7 +7507,6 @@ int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag) dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n", __func__, tag, err); -out: return err; } @@ -7683,6 +7629,29 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) return err; } +/** + * ufshcd_process_probe_result - Process the ufshcd_probe_hba() result. + * @hba: UFS host controller instance. + * @probe_start: time when the ufshcd_probe_hba() call started. + * @ret: ufshcd_probe_hba() return value. + */ +static void ufshcd_process_probe_result(struct ufs_hba *hba, + ktime_t probe_start, int ret) +{ + unsigned long flags; + + spin_lock_irqsave(hba->host->host_lock, flags); + if (ret) + hba->ufshcd_state = UFSHCD_STATE_ERROR; + else if (hba->ufshcd_state == UFSHCD_STATE_RESET) + hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; + spin_unlock_irqrestore(hba->host->host_lock, flags); + + trace_ufshcd_init(dev_name(hba->dev), ret, + ktime_to_us(ktime_sub(ktime_get(), probe_start)), + hba->curr_dev_pwr_mode, hba->uic_link_state); +} + /** * ufshcd_host_reset_and_restore - reset and restore host controller * @hba: per-adapter instance @@ -7712,8 +7681,14 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba) err = ufshcd_hba_enable(hba); /* Establish the link again and restore the device */ - if (!err) - err = ufshcd_probe_hba(hba, false); + if (!err) { + ktime_t probe_start = ktime_get(); + + err = ufshcd_device_init(hba, /*init_dev_params=*/false); + if (!err) + err = ufshcd_probe_hba(hba, false); + ufshcd_process_probe_result(hba, probe_start, err); + } if (err) dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err); @@ -8231,7 +8206,7 @@ static void ufshcd_update_rtc(struct ufs_hba *hba) err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, QUERY_ATTR_IDN_SECONDS_PASSED, 0, 0, &val); - ufshcd_rpm_put_sync(hba); + ufshcd_rpm_put(hba); if (err) dev_err(hba->dev, "%s: Failed to update rtc %d\n", __func__, err); @@ -8648,6 +8623,14 @@ static int ufshcd_add_lus(struct ufs_hba *hba) ufshcd_init_clk_scaling_sysfs(hba); } + /* + * The RTC update code accesses the hba->ufs_device_wlun->sdev_gendev + * pointer and hence must only be started after the WLUN pointer has + * been initialized by ufshcd_scsi_add_wlus(). + */ + schedule_delayed_work(&hba->ufs_rtc_update_work, + msecs_to_jiffies(UFS_RTC_UPDATE_INTERVAL_MS)); + ufs_bsg_probe(hba); scsi_scan_host(hba->host); @@ -8731,10 +8714,43 @@ static void ufshcd_config_mcq(struct ufs_hba *hba) hba->nutrs); } +static int ufshcd_post_device_init(struct ufs_hba *hba) +{ + int ret; + + ufshcd_tune_unipro_params(hba); + + /* UFS device is also active now */ + ufshcd_set_ufs_dev_active(hba); + ufshcd_force_reset_auto_bkops(hba); + + ufshcd_set_timestamp_attr(hba); + + if (!hba->max_pwr_info.is_valid) + return 0; + + /* + * Set the right value to bRefClkFreq before attempting to + * switch to HS gears. + */ + if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL) + ufshcd_set_dev_ref_clk(hba); + /* Gear up to HS gear. */ + ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info); + if (ret) { + dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n", + __func__, ret); + return ret; + } + + return 0; +} + static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params) { int ret; - struct Scsi_Host *host = hba->host; + + WARN_ON_ONCE(!hba->scsi_host_added); hba->ufshcd_state = UFSHCD_STATE_RESET; @@ -8775,58 +8791,14 @@ static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params) ret = ufshcd_device_params_init(hba); if (ret) return ret; - if (is_mcq_supported(hba) && !hba->scsi_host_added) { - ufshcd_mcq_enable(hba); - ret = ufshcd_alloc_mcq(hba); - if (!ret) { - ufshcd_config_mcq(hba); - } else { - /* Continue with SDB mode */ - ufshcd_mcq_disable(hba); - use_mcq_mode = false; - dev_err(hba->dev, "MCQ mode is disabled, err=%d\n", - ret); - } - ret = scsi_add_host(host, hba->dev); - if (ret) { - dev_err(hba->dev, "scsi_add_host failed\n"); - return ret; - } - hba->scsi_host_added = true; - } else if (is_mcq_supported(hba)) { - /* UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH is set */ + if (is_mcq_supported(hba) && + hba->quirks & UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH) { ufshcd_config_mcq(hba); ufshcd_mcq_enable(hba); } } - ufshcd_tune_unipro_params(hba); - - /* UFS device is also active now */ - ufshcd_set_ufs_dev_active(hba); - ufshcd_force_reset_auto_bkops(hba); - - ufshcd_set_timestamp_attr(hba); - schedule_delayed_work(&hba->ufs_rtc_update_work, - msecs_to_jiffies(UFS_RTC_UPDATE_INTERVAL_MS)); - - /* Gear up to HS gear if supported */ - if (hba->max_pwr_info.is_valid) { - /* - * Set the right value to bRefClkFreq before attempting to - * switch to HS gears. - */ - if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL) - ufshcd_set_dev_ref_clk(hba); - ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info); - if (ret) { - dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n", - __func__, ret); - return ret; - } - } - - return 0; + return ufshcd_post_device_init(hba); } /** @@ -8840,14 +8812,8 @@ static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params) */ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params) { - ktime_t start = ktime_get(); - unsigned long flags; int ret; - ret = ufshcd_device_init(hba, init_dev_params); - if (ret) - goto out; - if (!hba->pm_op_in_progress && (hba->quirks & UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH)) { /* Reset the device and controller before doing reinit */ @@ -8860,13 +8826,13 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params) dev_err(hba->dev, "Host controller enable failed\n"); ufshcd_print_evt_hist(hba); ufshcd_print_host_state(hba); - goto out; + return ret; } /* Reinit the device */ ret = ufshcd_device_init(hba, init_dev_params); if (ret) - goto out; + return ret; } ufshcd_print_pwr_info(hba); @@ -8886,18 +8852,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params) ufshcd_write_ee_control(hba); ufshcd_configure_auto_hibern8(hba); -out: - spin_lock_irqsave(hba->host->host_lock, flags); - if (ret) - hba->ufshcd_state = UFSHCD_STATE_ERROR; - else if (hba->ufshcd_state == UFSHCD_STATE_RESET) - hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; - spin_unlock_irqrestore(hba->host->host_lock, flags); - - trace_ufshcd_init(dev_name(hba->dev), ret, - ktime_to_us(ktime_sub(ktime_get(), start)), - hba->curr_dev_pwr_mode, hba->uic_link_state); - return ret; + return 0; } /** @@ -8908,11 +8863,14 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params) static void ufshcd_async_scan(void *data, async_cookie_t cookie) { struct ufs_hba *hba = (struct ufs_hba *)data; + ktime_t probe_start; int ret; down(&hba->host_sem); /* Initialize hba, detect and initialize UFS device */ + probe_start = ktime_get(); ret = ufshcd_probe_hba(hba, true); + ufshcd_process_probe_result(hba, probe_start, ret); up(&hba->host_sem); if (ret) goto out; @@ -10209,7 +10167,9 @@ static void ufshcd_wl_shutdown(struct device *dev) shost_for_each_device(sdev, hba->host) { if (sdev == hba->ufs_device_wlun) continue; - scsi_device_quiesce(sdev); + mutex_lock(&sdev->state_mutex); + scsi_device_set_state(sdev, SDEV_OFFLINE); + mutex_unlock(&sdev->state_mutex); } __ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM); @@ -10313,6 +10273,8 @@ EXPORT_SYMBOL_GPL(ufshcd_dealloc_host); */ static int ufshcd_set_dma_mask(struct ufs_hba *hba) { + if (hba->vops && hba->vops->set_dma_mask) + return hba->vops->set_dma_mask(hba); if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) { if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64))) return 0; @@ -10376,6 +10338,74 @@ static const struct blk_mq_ops ufshcd_tmf_ops = { .queue_rq = ufshcd_queue_tmf, }; +static int ufshcd_add_scsi_host(struct ufs_hba *hba) +{ + int err; + + if (is_mcq_supported(hba)) { + ufshcd_mcq_enable(hba); + err = ufshcd_alloc_mcq(hba); + if (!err) { + ufshcd_config_mcq(hba); + } else { + /* Continue with SDB mode */ + ufshcd_mcq_disable(hba); + use_mcq_mode = false; + dev_err(hba->dev, "MCQ mode is disabled, err=%d\n", + err); + } + } + if (!is_mcq_supported(hba) && !hba->lsdb_sup) { + dev_err(hba->dev, + "%s: failed to initialize (legacy doorbell mode not supported)\n", + __func__); + return -EINVAL; + } + + err = scsi_add_host(hba->host, hba->dev); + if (err) { + dev_err(hba->dev, "scsi_add_host failed\n"); + return err; + } + hba->scsi_host_added = true; + + hba->tmf_tag_set = (struct blk_mq_tag_set) { + .nr_hw_queues = 1, + .queue_depth = hba->nutmrs, + .ops = &ufshcd_tmf_ops, + .flags = BLK_MQ_F_NO_SCHED, + }; + err = blk_mq_alloc_tag_set(&hba->tmf_tag_set); + if (err < 0) + goto remove_scsi_host; + hba->tmf_queue = blk_mq_alloc_queue(&hba->tmf_tag_set, NULL, NULL); + if (IS_ERR(hba->tmf_queue)) { + err = PTR_ERR(hba->tmf_queue); + goto free_tmf_tag_set; + } + hba->tmf_rqs = devm_kcalloc(hba->dev, hba->nutmrs, + sizeof(*hba->tmf_rqs), GFP_KERNEL); + if (!hba->tmf_rqs) { + err = -ENOMEM; + goto free_tmf_queue; + } + + return 0; + +free_tmf_queue: + blk_mq_destroy_queue(hba->tmf_queue); + blk_put_queue(hba->tmf_queue); + +free_tmf_tag_set: + blk_mq_free_tag_set(&hba->tmf_tag_set); + +remove_scsi_host: + if (hba->scsi_host_added) + scsi_remove_host(hba->host); + + return err; +} + /** * ufshcd_init - Driver initialization routine * @hba: per-adapter instance @@ -10507,42 +10537,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) hba->is_irq_enabled = true; } - if (!is_mcq_supported(hba)) { - if (!hba->lsdb_sup) { - dev_err(hba->dev, "%s: failed to initialize (legacy doorbell mode not supported)\n", - __func__); - err = -EINVAL; - goto out_disable; - } - err = scsi_add_host(host, hba->dev); - if (err) { - dev_err(hba->dev, "scsi_add_host failed\n"); - goto out_disable; - } - hba->scsi_host_added = true; - } - - hba->tmf_tag_set = (struct blk_mq_tag_set) { - .nr_hw_queues = 1, - .queue_depth = hba->nutmrs, - .ops = &ufshcd_tmf_ops, - .flags = BLK_MQ_F_NO_SCHED, - }; - err = blk_mq_alloc_tag_set(&hba->tmf_tag_set); - if (err < 0) - goto out_remove_scsi_host; - hba->tmf_queue = blk_mq_alloc_queue(&hba->tmf_tag_set, NULL, NULL); - if (IS_ERR(hba->tmf_queue)) { - err = PTR_ERR(hba->tmf_queue); - goto free_tmf_tag_set; - } - hba->tmf_rqs = devm_kcalloc(hba->dev, hba->nutmrs, - sizeof(*hba->tmf_rqs), GFP_KERNEL); - if (!hba->tmf_rqs) { - err = -ENOMEM; - goto free_tmf_queue; - } - /* Reset the attached device */ ufshcd_device_reset(hba); @@ -10554,7 +10548,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) dev_err(hba->dev, "Host controller enable failed\n"); ufshcd_print_evt_hist(hba); ufshcd_print_host_state(hba); - goto free_tmf_queue; + goto out_disable; } /* @@ -10580,7 +10574,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) /* Hold auto suspend until async scan completes */ pm_runtime_get_sync(dev); - atomic_set(&hba->scsi_block_reqs_cnt, 0); + /* * We are assuming that device wasn't put in sleep/power-down * state exclusively during the boot stage before kernel. @@ -10589,6 +10583,49 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) */ ufshcd_set_ufs_dev_active(hba); + /* Initialize hba, detect and initialize UFS device */ + ktime_t probe_start = ktime_get(); + + hba->ufshcd_state = UFSHCD_STATE_RESET; + + err = ufshcd_link_startup(hba); + if (err) + goto out_disable; + + if (hba->quirks & UFSHCD_QUIRK_SKIP_PH_CONFIGURATION) + goto initialized; + + /* Debug counters initialization */ + ufshcd_clear_dbg_ufs_stats(hba); + + /* UniPro link is active now */ + ufshcd_set_link_active(hba); + + /* Verify device initialization by sending NOP OUT UPIU */ + err = ufshcd_verify_dev_init(hba); + if (err) + goto out_disable; + + /* Initiate UFS initialization, and waiting until completion */ + err = ufshcd_complete_dev_init(hba); + if (err) + goto out_disable; + + err = ufshcd_device_params_init(hba); + if (err) + goto out_disable; + + err = ufshcd_post_device_init(hba); + +initialized: + ufshcd_process_probe_result(hba, probe_start, err); + if (err) + goto out_disable; + + err = ufshcd_add_scsi_host(hba); + if (err) + goto out_disable; + async_schedule(ufshcd_async_scan, hba); ufs_sysfs_add_nodes(hba->dev); @@ -10596,14 +10633,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) ufshcd_pm_qos_init(hba); return 0; -free_tmf_queue: - blk_mq_destroy_queue(hba->tmf_queue); - blk_put_queue(hba->tmf_queue); -free_tmf_tag_set: - blk_mq_free_tag_set(&hba->tmf_tag_set); -out_remove_scsi_host: - if (hba->scsi_host_added) - scsi_remove_host(hba->host); out_disable: hba->is_irq_enabled = false; ufshcd_hba_exit(hba); diff --git a/drivers/ufs/host/tc-dwc-g210-pci.c b/drivers/ufs/host/tc-dwc-g210-pci.c index 876781fd6861..0167d8bef71a 100644 --- a/drivers/ufs/host/tc-dwc-g210-pci.c +++ b/drivers/ufs/host/tc-dwc-g210-pci.c @@ -80,14 +80,12 @@ tc_dwc_g210_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) pci_set_master(pdev); - err = pcim_iomap_regions(pdev, 1 << 0, UFSHCD); - if (err < 0) { + mmio_base = pcim_iomap_region(pdev, 0, UFSHCD); + if (IS_ERR(mmio_base)) { dev_err(&pdev->dev, "request and iomap failed\n"); - return err; + return PTR_ERR(mmio_base); } - mmio_base = pcim_iomap_table(pdev)[0]; - err = ufshcd_alloc_host(&pdev->dev, &hba); if (err) { dev_err(&pdev->dev, "Allocation failed\n"); diff --git a/drivers/ufs/host/ufs-exynos.c b/drivers/ufs/host/ufs-exynos.c index 9ec318ef52bf..91827b3e582b 100644 --- a/drivers/ufs/host/ufs-exynos.c +++ b/drivers/ufs/host/ufs-exynos.c @@ -48,6 +48,8 @@ #define HCI_UNIPRO_APB_CLK_CTRL 0x68 #define UNIPRO_APB_CLK(v, x) (((v) & ~0xF) | ((x) & 0xF)) #define HCI_AXIDMA_RWDATA_BURST_LEN 0x6C +#define WLU_EN BIT(31) +#define WLU_BURST_LEN(x) ((x) << 27 | ((x) & 0xF)) #define HCI_GPIO_OUT 0x70 #define HCI_ERR_EN_PA_LAYER 0x78 #define HCI_ERR_EN_DL_LAYER 0x7C @@ -74,6 +76,10 @@ #define CLK_CTRL_EN_MASK (REFCLK_CTRL_EN |\ UNIPRO_PCLK_CTRL_EN |\ UNIPRO_MCLK_CTRL_EN) + +#define HCI_IOP_ACG_DISABLE 0x100 +#define HCI_IOP_ACG_DISABLE_EN BIT(0) + /* Device fatal error */ #define DFES_ERR_EN BIT(31) #define DFES_DEF_L2_ERRS (UIC_DATA_LINK_LAYER_ERROR_RX_BUF_OF |\ @@ -198,15 +204,8 @@ static inline void exynos_ufs_ungate_clks(struct exynos_ufs *ufs) exynos_ufs_ctrl_clkstop(ufs, false); } -static int exynos7_ufs_drv_init(struct device *dev, struct exynos_ufs *ufs) -{ - return 0; -} - -static int exynosauto_ufs_drv_init(struct device *dev, struct exynos_ufs *ufs) +static int exynos_ufs_shareability(struct exynos_ufs *ufs) { - struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr; - /* IO Coherency setting */ if (ufs->sysreg) { return regmap_update_bits(ufs->sysreg, @@ -214,11 +213,32 @@ static int exynosauto_ufs_drv_init(struct device *dev, struct exynos_ufs *ufs) UFS_SHARABLE, UFS_SHARABLE); } - attr->tx_dif_p_nsec = 3200000; - return 0; } +static int gs101_ufs_drv_init(struct exynos_ufs *ufs) +{ + struct ufs_hba *hba = ufs->hba; + u32 reg; + + /* Enable WriteBooster */ + hba->caps |= UFSHCD_CAP_WB_EN; + + /* Enable clock gating and hibern8 */ + hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING; + + /* set ACG to be controlled by UFS_ACG_DISABLE */ + reg = hci_readl(ufs, HCI_IOP_ACG_DISABLE); + hci_writel(ufs, reg & (~HCI_IOP_ACG_DISABLE_EN), HCI_IOP_ACG_DISABLE); + + return exynos_ufs_shareability(ufs); +} + +static int exynosauto_ufs_drv_init(struct exynos_ufs *ufs) +{ + return exynos_ufs_shareability(ufs); +} + static int exynosauto_ufs_post_hce_enable(struct exynos_ufs *ufs) { struct ufs_hba *hba = ufs->hba; @@ -546,6 +566,9 @@ static void exynos_ufs_specify_phy_time_attr(struct exynos_ufs *ufs) struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr; struct ufs_phy_time_cfg *t_cfg = &ufs->t_cfg; + if (ufs->opts & EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR) + return; + t_cfg->tx_linereset_p = exynos_ufs_calc_time_cntr(ufs, attr->tx_dif_p_nsec); t_cfg->tx_linereset_n = @@ -724,6 +747,9 @@ static void exynos_ufs_config_smu(struct exynos_ufs *ufs) { u32 reg, val; + if (ufs->opts & EXYNOS_UFS_OPT_UFSPR_SECURE) + return; + exynos_ufs_disable_auto_ctrl_hcc_save(ufs, &val); /* make encryption disabled by default */ @@ -771,6 +797,21 @@ static void exynos_ufs_config_sync_pattern_mask(struct exynos_ufs *ufs, exynos_ufs_disable_ov_tm(hba); } +#define UFS_HW_VER_MAJOR_MASK GENMASK(15, 8) + +static u32 exynos_ufs_get_hs_gear(struct ufs_hba *hba) +{ + u8 major; + + major = FIELD_GET(UFS_HW_VER_MAJOR_MASK, hba->ufs_version); + + if (major >= 3) + return UFS_HS_G4; + + /* Default is HS-G3 */ + return UFS_HS_G3; +} + static int exynos_ufs_pre_pwr_mode(struct ufs_hba *hba, struct ufs_pa_layer_attr *dev_max_params, struct ufs_pa_layer_attr *dev_req_params) @@ -788,6 +829,10 @@ static int exynos_ufs_pre_pwr_mode(struct ufs_hba *hba, ufshcd_init_host_params(&host_params); + /* This driver only support symmetric gear setting e.g. hs_tx_gear == hs_rx_gear */ + host_params.hs_tx_gear = exynos_ufs_get_hs_gear(hba); + host_params.hs_rx_gear = exynos_ufs_get_hs_gear(hba); + ret = ufshcd_negotiate_pwr_params(&host_params, dev_max_params, dev_req_params); if (ret) { pr_err("%s: failed to determine capabilities\n", __func__); @@ -1429,7 +1474,7 @@ static int exynos_ufs_init(struct ufs_hba *hba) exynos_ufs_fmp_init(hba, ufs); if (ufs->drv_data->drv_init) { - ret = ufs->drv_data->drv_init(dev, ufs); + ret = ufs->drv_data->drv_init(ufs); if (ret) { dev_err(dev, "failed to init drv-data\n"); goto out; @@ -1440,8 +1485,8 @@ static int exynos_ufs_init(struct ufs_hba *hba) if (ret) goto out; exynos_ufs_specify_phy_time_attr(ufs); - if (!(ufs->opts & EXYNOS_UFS_OPT_UFSPR_SECURE)) - exynos_ufs_config_smu(ufs); + + exynos_ufs_config_smu(ufs); hba->host->dma_alignment = DATA_UNIT_SIZE - 1; return 0; @@ -1484,12 +1529,12 @@ static void exynos_ufs_dev_hw_reset(struct ufs_hba *hba) hci_writel(ufs, 1 << 0, HCI_GPIO_OUT); } -static void exynos_ufs_pre_hibern8(struct ufs_hba *hba, u8 enter) +static void exynos_ufs_pre_hibern8(struct ufs_hba *hba, enum uic_cmd_dme cmd) { struct exynos_ufs *ufs = ufshcd_get_variant(hba); struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr; - if (!enter) { + if (cmd == UIC_CMD_DME_HIBER_EXIT) { if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL) exynos_ufs_disable_auto_ctrl_hcc(ufs); exynos_ufs_ungate_clks(ufs); @@ -1517,30 +1562,11 @@ static void exynos_ufs_pre_hibern8(struct ufs_hba *hba, u8 enter) } } -static void exynos_ufs_post_hibern8(struct ufs_hba *hba, u8 enter) +static void exynos_ufs_post_hibern8(struct ufs_hba *hba, enum uic_cmd_dme cmd) { struct exynos_ufs *ufs = ufshcd_get_variant(hba); - if (!enter) { - u32 cur_mode = 0; - u32 pwrmode; - - if (ufshcd_is_hs_mode(&ufs->dev_req_params)) - pwrmode = FAST_MODE; - else - pwrmode = SLOW_MODE; - - ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PWRMODE), &cur_mode); - if (cur_mode != (pwrmode << 4 | pwrmode)) { - dev_warn(hba->dev, "%s: power mode change\n", __func__); - hba->pwr_info.pwr_rx = (cur_mode >> 4) & 0xf; - hba->pwr_info.pwr_tx = cur_mode & 0xf; - ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info); - } - - if (!(ufs->opts & EXYNOS_UFS_OPT_SKIP_CONNECTION_ESTAB)) - exynos_ufs_establish_connt(ufs); - } else { + if (cmd == UIC_CMD_DME_HIBER_ENTER) { ufs->entry_hibern8_t = ktime_get(); exynos_ufs_gate_clks(ufs); if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL) @@ -1627,15 +1653,15 @@ static int exynos_ufs_pwr_change_notify(struct ufs_hba *hba, } static void exynos_ufs_hibern8_notify(struct ufs_hba *hba, - enum uic_cmd_dme enter, + enum uic_cmd_dme cmd, enum ufs_notify_change_status notify) { switch ((u8)notify) { case PRE_CHANGE: - exynos_ufs_pre_hibern8(hba, enter); + exynos_ufs_pre_hibern8(hba, cmd); break; case POST_CHANGE: - exynos_ufs_post_hibern8(hba, enter); + exynos_ufs_post_hibern8(hba, cmd); break; } } @@ -1891,6 +1917,12 @@ static int gs101_ufs_post_link(struct exynos_ufs *ufs) { struct ufs_hba *hba = ufs->hba; + /* + * Enable Write Line Unique. This field has to be 0x3 + * to support Write Line Unique transaction on gs101. + */ + hci_writel(ufs, WLU_EN | WLU_BURST_LEN(3), HCI_AXIDMA_RWDATA_BURST_LEN); + exynos_ufs_enable_dbg_mode(hba); ufshcd_dme_set(hba, UIC_ARG_MIB(PA_SAVECONFIGTIME), 0x3e8); exynos_ufs_disable_dbg_mode(hba); @@ -2036,7 +2068,6 @@ static const struct exynos_ufs_drv_data exynos_ufs_drvs = { EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX | EXYNOS_UFS_OPT_SKIP_CONNECTION_ESTAB | EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER, - .drv_init = exynos7_ufs_drv_init, .pre_link = exynos7_ufs_pre_link, .post_link = exynos7_ufs_post_link, .pre_pwr_change = exynos7_ufs_pre_pwr_change, @@ -2045,26 +2076,6 @@ static const struct exynos_ufs_drv_data exynos_ufs_drvs = { static struct exynos_ufs_uic_attr gs101_uic_attr = { .tx_trailingclks = 0xff, - .tx_dif_p_nsec = 3000000, /* unit: ns */ - .tx_dif_n_nsec = 1000000, /* unit: ns */ - .tx_high_z_cnt_nsec = 20000, /* unit: ns */ - .tx_base_unit_nsec = 100000, /* unit: ns */ - .tx_gran_unit_nsec = 4000, /* unit: ns */ - .tx_sleep_cnt = 1000, /* unit: ns */ - .tx_min_activatetime = 0xa, - .rx_filler_enable = 0x2, - .rx_dif_p_nsec = 1000000, /* unit: ns */ - .rx_hibern8_wait_nsec = 4000000, /* unit: ns */ - .rx_base_unit_nsec = 100000, /* unit: ns */ - .rx_gran_unit_nsec = 4000, /* unit: ns */ - .rx_sleep_cnt = 1280, /* unit: ns */ - .rx_stall_cnt = 320, /* unit: ns */ - .rx_hs_g1_sync_len_cap = SYNC_LEN_COARSE(0xf), - .rx_hs_g2_sync_len_cap = SYNC_LEN_COARSE(0xf), - .rx_hs_g3_sync_len_cap = SYNC_LEN_COARSE(0xf), - .rx_hs_g1_prep_sync_len_cap = PREP_LEN(0xf), - .rx_hs_g2_prep_sync_len_cap = PREP_LEN(0xf), - .rx_hs_g3_prep_sync_len_cap = PREP_LEN(0xf), .pa_dbg_opt_suite1_val = 0x90913C1C, .pa_dbg_opt_suite1_off = PA_GS101_DBG_OPTION_SUITE1, .pa_dbg_opt_suite2_val = 0xE01C115F, @@ -2122,11 +2133,10 @@ static const struct exynos_ufs_drv_data gs101_ufs_drvs = { UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR | UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL | UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING, - .opts = EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL | - EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR | + .opts = EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR | EXYNOS_UFS_OPT_UFSPR_SECURE | EXYNOS_UFS_OPT_TIMER_TICK_SELECT, - .drv_init = exynosauto_ufs_drv_init, + .drv_init = gs101_ufs_drv_init, .pre_link = gs101_ufs_pre_link, .post_link = gs101_ufs_post_link, .pre_pwr_change = gs101_ufs_pre_pwr_change, diff --git a/drivers/ufs/host/ufs-exynos.h b/drivers/ufs/host/ufs-exynos.h index 1646c4a9bb08..9670dc138d1e 100644 --- a/drivers/ufs/host/ufs-exynos.h +++ b/drivers/ufs/host/ufs-exynos.h @@ -182,7 +182,7 @@ struct exynos_ufs_drv_data { unsigned int quirks; unsigned int opts; /* SoC's specific operations */ - int (*drv_init)(struct device *dev, struct exynos_ufs *ufs); + int (*drv_init)(struct exynos_ufs *ufs); int (*pre_link)(struct exynos_ufs *ufs); int (*post_link)(struct exynos_ufs *ufs); int (*pre_pwr_change)(struct exynos_ufs *ufs, diff --git a/drivers/ufs/host/ufs-mediatek.c b/drivers/ufs/host/ufs-mediatek.c index 9a5919434c4e..06ab1e5e8b6f 100644 --- a/drivers/ufs/host/ufs-mediatek.c +++ b/drivers/ufs/host/ufs-mediatek.c @@ -1780,6 +1780,15 @@ static int ufs_mtk_config_esi(struct ufs_hba *hba) return ufs_mtk_config_mcq(hba, true); } +static void ufs_mtk_config_scsi_dev(struct scsi_device *sdev) +{ + struct ufs_hba *hba = shost_priv(sdev->host); + + dev_dbg(hba->dev, "lu %llu scsi device configured", sdev->lun); + if (sdev->lun == 2) + blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, sdev->request_queue); +} + /* * struct ufs_hba_mtk_vops - UFS MTK specific variant operations * @@ -1809,6 +1818,7 @@ static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = { .op_runtime_config = ufs_mtk_op_runtime_config, .mcq_config_resource = ufs_mtk_mcq_config_resource, .config_esi = ufs_mtk_config_esi, + .config_scsi_dev = ufs_mtk_config_scsi_dev, }; /** diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c index ecdfff2456e3..3b592492e152 100644 --- a/drivers/ufs/host/ufs-qcom.c +++ b/drivers/ufs/host/ufs-qcom.c @@ -828,12 +828,28 @@ static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba) if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME) err = ufs_qcom_quirk_host_pa_saveconfigtime(hba); - if (hba->dev_info.wmanufacturerid == UFS_VENDOR_WDC) - hba->dev_quirks |= UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE; - return err; } +/* UFS device-specific quirks */ +static struct ufs_dev_quirk ufs_qcom_dev_fixups[] = { + { .wmanufacturerid = UFS_VENDOR_SKHYNIX, + .model = UFS_ANY_MODEL, + .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM }, + { .wmanufacturerid = UFS_VENDOR_TOSHIBA, + .model = UFS_ANY_MODEL, + .quirk = UFS_DEVICE_QUIRK_DELAY_AFTER_LPM }, + { .wmanufacturerid = UFS_VENDOR_WDC, + .model = UFS_ANY_MODEL, + .quirk = UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE }, + {} +}; + +static void ufs_qcom_fixup_dev_quirks(struct ufs_hba *hba) +{ + ufshcd_fixup_dev_quirks(hba, ufs_qcom_dev_fixups); +} + static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba) { return ufshci_version(2, 0); @@ -858,7 +874,8 @@ static void ufs_qcom_advertise_quirks(struct ufs_hba *hba) if (host->hw_ver.major > 0x3) hba->quirks |= UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH; - if (of_device_is_compatible(hba->dev->of_node, "qcom,sm8550-ufshc")) + if (of_device_is_compatible(hba->dev->of_node, "qcom,sm8550-ufshc") || + of_device_is_compatible(hba->dev->of_node, "qcom,sm8650-ufshc")) hba->quirks |= UFSHCD_QUIRK_BROKEN_LSDBS_CAP; } @@ -1801,6 +1818,7 @@ static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = { .link_startup_notify = ufs_qcom_link_startup_notify, .pwr_change_notify = ufs_qcom_pwr_change_notify, .apply_dev_quirks = ufs_qcom_apply_dev_quirks, + .fixup_dev_quirks = ufs_qcom_fixup_dev_quirks, .suspend = ufs_qcom_suspend, .resume = ufs_qcom_resume, .dbg_register_dump = ufs_qcom_dump_dbg_regs, diff --git a/drivers/ufs/host/ufs-renesas.c b/drivers/ufs/host/ufs-renesas.c index 8711e5cbc968..3ff97112e1f6 100644 --- a/drivers/ufs/host/ufs-renesas.c +++ b/drivers/ufs/host/ufs-renesas.c @@ -7,6 +7,7 @@ #include #include +#include #include #include #include @@ -364,14 +365,20 @@ static int ufs_renesas_init(struct ufs_hba *hba) return -ENOMEM; ufshcd_set_variant(hba, priv); - hba->quirks |= UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS | UFSHCD_QUIRK_HIBERN_FASTAUTO; + hba->quirks |= UFSHCD_QUIRK_HIBERN_FASTAUTO; return 0; } +static int ufs_renesas_set_dma_mask(struct ufs_hba *hba) +{ + return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32)); +} + static const struct ufs_hba_variant_ops ufs_renesas_vops = { .name = "renesas", .init = ufs_renesas_init, + .set_dma_mask = ufs_renesas_set_dma_mask, .setup_clocks = ufs_renesas_setup_clocks, .hce_enable_notify = ufs_renesas_hce_enable_notify, .dbg_register_dump = ufs_renesas_dbg_register_dump, diff --git a/drivers/ufs/host/ufshcd-pci.c b/drivers/ufs/host/ufshcd-pci.c index 54e0cc0653a2..ea39c5d5b8cf 100644 --- a/drivers/ufs/host/ufshcd-pci.c +++ b/drivers/ufs/host/ufshcd-pci.c @@ -588,14 +588,12 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) pci_set_master(pdev); - err = pcim_iomap_regions(pdev, 1 << 0, UFSHCD); - if (err < 0) { + mmio_base = pcim_iomap_region(pdev, 0, UFSHCD); + if (IS_ERR(mmio_base)) { dev_err(&pdev->dev, "request and iomap failed\n"); - return err; + return PTR_ERR(mmio_base); } - mmio_base = pcim_iomap_table(pdev)[0]; - err = ufshcd_alloc_host(&pdev->dev, &hba); if (err) { dev_err(&pdev->dev, "Allocation failed\n"); diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h index 3f68ae3e4330..d7aca9e61684 100644 --- a/include/ufs/ufshcd.h +++ b/include/ufs/ufshcd.h @@ -299,6 +299,8 @@ struct ufs_pwr_mode_info { * @max_num_rtt: maximum RTT supported by the host * @init: called when the driver is initialized * @exit: called to cleanup everything done in init + * @set_dma_mask: For setting another DMA mask than indicated by the 64AS + * capability bit. * @get_ufs_hci_version: called to get UFS HCI version * @clk_scale_notify: notifies that clks are scaled up/down * @setup_clocks: called before touching any of the controller registers @@ -341,6 +343,7 @@ struct ufs_hba_variant_ops { int (*init)(struct ufs_hba *); void (*exit)(struct ufs_hba *); u32 (*get_ufs_hci_version)(struct ufs_hba *); + int (*set_dma_mask)(struct ufs_hba *); int (*clk_scale_notify)(struct ufs_hba *, bool, enum ufs_notify_change_status); int (*setup_clocks)(struct ufs_hba *, bool, @@ -383,6 +386,7 @@ struct ufs_hba_variant_ops { int (*get_outstanding_cqs)(struct ufs_hba *hba, unsigned long *ocqs); int (*config_esi)(struct ufs_hba *hba); + void (*config_scsi_dev)(struct scsi_device *sdev); }; /* clock gating state */ @@ -623,12 +627,6 @@ enum ufshcd_quirks { */ UFSHCD_QUIRK_SKIP_PH_CONFIGURATION = 1 << 16, - /* - * This quirk needs to be enabled if the host controller has - * 64-bit addressing supported capability but it doesn't work. - */ - UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS = 1 << 17, - /* * This quirk needs to be enabled if the host controller has * auto-hibernate capability but it's FASTAUTO only. @@ -877,9 +875,10 @@ enum ufshcd_mcq_opr { * @tmf_tag_set: TMF tag set. * @tmf_queue: Used to allocate TMF tags. * @tmf_rqs: array with pointers to TMF requests while these are in progress. - * @active_uic_cmd: handle of active UIC command - * @uic_cmd_mutex: mutex for UIC command - * @uic_async_done: completion used during UIC processing + * @active_uic_cmd: pointer to active UIC command. + * @uic_cmd_mutex: mutex used for serializing UIC command processing. + * @uic_async_done: completion used to wait for power mode or hibernation state + * changes. * @ufshcd_state: UFSHCD state * @eh_flags: Error handling flags * @intr_mask: Interrupt Mask Bits @@ -927,7 +926,6 @@ enum ufshcd_mcq_opr { * @wb_mutex: used to serialize devfreq and sysfs write booster toggling * @clk_scaling_lock: used to serialize device commands and clock scaling * @desc_size: descriptor sizes reported by device - * @scsi_block_reqs_cnt: reference counting for scsi block requests * @bsg_dev: struct device associated with the BSG queue * @bsg_queue: BSG queue associated with the UFS controller * @rpm_dev_flush_recheck_work: used to suspend from RPM (runtime power @@ -1088,7 +1086,6 @@ struct ufs_hba { struct mutex wb_mutex; struct rw_semaphore clk_scaling_lock; - atomic_t scsi_block_reqs_cnt; struct device bsg_dev; struct request_queue *bsg_queue; @@ -1320,8 +1317,8 @@ void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i); unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba, struct ufs_hw_queue *hwq); void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba); -void ufshcd_mcq_enable_esi(struct ufs_hba *hba); void ufshcd_mcq_enable(struct ufs_hba *hba); +void ufshcd_mcq_enable_esi(struct ufs_hba *hba); void ufshcd_mcq_config_esi(struct ufs_hba *hba, struct msi_msg *msg); int ufshcd_opp_config_clks(struct device *dev, struct opp_table *opp_table,