SHARE
TWEET

qpnp-fg.c [bun - v3]

Snow_ro Dec 11th, 2019 73 Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
  1. /* Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
  2.  *
  3.  * This program is free software; you can redistribute it and/or modify
  4.  * it under the terms of the GNU General Public License version 2 and
  5.  * only version 2 as published by the Free Software Foundation.
  6.  *
  7.  * This program is distributed in the hope that it will be useful,
  8.  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9.  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  10.  * GNU General Public License for more details.
  11.  */
  12.  
  13. #define pr_fmt(fmt) "FG: %s: " fmt, __func__
  14.  
  15. #include <linux/atomic.h>
  16. #include <linux/delay.h>
  17. #include <linux/kernel.h>
  18. #include <linux/of.h>
  19. #include <linux/rtc.h>
  20. #include <linux/err.h>
  21. #include <linux/debugfs.h>
  22. #include <linux/slab.h>
  23. #include <linux/uaccess.h>
  24. #include <linux/init.h>
  25. #include <linux/spmi.h>
  26. #include <linux/of_irq.h>
  27. #include <linux/interrupt.h>
  28. #include <linux/bitops.h>
  29. #include <linux/types.h>
  30. #include <linux/module.h>
  31. #include <linux/ktime.h>
  32. #include <linux/power_supply.h>
  33. #include <linux/of_batterydata.h>
  34. #include <linux/spinlock.h>
  35. #include <linux/string_helpers.h>
  36. #include <linux/alarmtimer.h>
  37. #include <linux/qpnp/qpnp-revid.h>
  38. #include <linux/reboot.h>
  39.  
  40. #ifdef CONFIG_PRODUCT_LE_ZL1
  41. static int empty_cn = 0;
  42. #endif
  43.  
  44. /* Register offsets */
  45.  
  46. /* Interrupt offsets */
  47. #define INT_RT_STS(base)            (base + 0x10)
  48. #define INT_EN_CLR(base)            (base + 0x16)
  49.  
  50. /* SPMI Register offsets */
  51. #define SOC_MONOTONIC_SOC   0x09
  52. #define OTP_CFG1        0xE2
  53. #define SOC_BOOT_MOD        0x50
  54. #define SOC_RESTART     0x51
  55.  
  56. #define REG_OFFSET_PERP_SUBTYPE 0x05
  57.  
  58. /* RAM register offsets */
  59. #define RAM_OFFSET      0x400
  60.  
  61. /* Bit/Mask definitions */
  62. #define FULL_PERCENT        0xFF
  63. #define MAX_TRIES_SOC       5
  64. #define MA_MV_BIT_RES       39
  65. #define MSB_SIGN        BIT(7)
  66. #define IBAT_VBAT_MASK      0x7F
  67. #define NO_OTP_PROF_RELOAD  BIT(6)
  68. #define REDO_FIRST_ESTIMATE BIT(3)
  69. #define RESTART_GO      BIT(0)
  70. #define THERM_DELAY_MASK    0xE0
  71.  
  72. /* SUBTYPE definitions */
  73. #define FG_SOC          0x9
  74. #define FG_BATT         0xA
  75. #define FG_ADC          0xB
  76. #define FG_MEMIF        0xC
  77.  
  78. #define QPNP_FG_DEV_NAME "qcom,qpnp-fg"
  79. #define MEM_IF_TIMEOUT_MS   5000
  80. #define FG_CYCLE_MS     1500
  81. #define BUCKET_COUNT        8
  82. #define BUCKET_SOC_PCT      (256 / BUCKET_COUNT)
  83.  
  84. #define BCL_MA_TO_ADC(_current, _adc_val) {     \
  85.     _adc_val = (u8)((_current) * 100 / 976);    \
  86. }
  87.  
  88. #define ESR_MAX             300000
  89. #define ESR_MIN             5000
  90.  
  91. /* Debug Flag Definitions */
  92. enum {
  93.     FG_SPMI_DEBUG_WRITES        = BIT(0), /* Show SPMI writes */
  94.     FG_SPMI_DEBUG_READS     = BIT(1), /* Show SPMI reads */
  95.     FG_IRQS             = BIT(2), /* Show interrupts */
  96.     FG_MEM_DEBUG_WRITES     = BIT(3), /* Show SRAM writes */
  97.     FG_MEM_DEBUG_READS      = BIT(4), /* Show SRAM reads */
  98.     FG_POWER_SUPPLY         = BIT(5), /* Show POWER_SUPPLY */
  99.     FG_STATUS           = BIT(6), /* Show FG status changes */
  100.     FG_AGING            = BIT(7), /* Show FG aging algorithm */
  101. };
  102.  
  103. /* PMIC REVISIONS */
  104. #define REVID_RESERVED          0
  105. #define REVID_VARIANT           1
  106. #define REVID_ANA_MAJOR         2
  107. #define REVID_DIG_MAJOR         3
  108.  
  109. enum dig_major {
  110.     DIG_REV_8994_1 = 0x1,
  111.     DIG_REV_8994_2 = 0x2,
  112.     DIG_REV_8950_3 = 0x3,
  113. };
  114.  
  115. enum pmic_subtype {
  116.     PMI8994     = 10,
  117.     PMI8950     = 17,
  118.     PMI8996     = 19,
  119.     PMI8937     = 55,
  120.     PMI8940     = 64,
  121. };
  122.  
  123. enum wa_flags {
  124.     IADC_GAIN_COMP_WA = BIT(0),
  125.     USE_CC_SOC_REG = BIT(1),
  126.     PULSE_REQUEST_WA = BIT(2),
  127.     BCL_HI_POWER_FOR_CHGLED_WA = BIT(3)
  128. };
  129.  
  130. enum current_sense_type {
  131.     INTERNAL_CURRENT_SENSE,
  132.     EXTERNAL_CURRENT_SENSE,
  133. };
  134.  
  135. struct fg_mem_setting {
  136.     u16 address;
  137.     u8  offset;
  138.     int value;
  139. };
  140.  
  141. struct fg_mem_data {
  142.     u16 address;
  143.     u8  offset;
  144.     unsigned int len;
  145.     int value;
  146. };
  147.  
  148. struct fg_learning_data {
  149.     int64_t         cc_uah;
  150.     int64_t         learned_cc_uah;
  151.     int         init_cc_pc_val;
  152.     bool            active;
  153.     bool            feedback_on;
  154.     struct mutex        learning_lock;
  155.     ktime_t         time_stamp;
  156.     /* configuration properties */
  157.     int         max_start_soc;
  158.     int         max_increment;
  159.     int         max_decrement;
  160.     int         min_temp;
  161.     int         max_temp;
  162.     int         vbat_est_thr_uv;
  163.     int         max_cap_limit;
  164.     int         min_cap_limit;
  165. };
  166.  
  167. struct fg_rslow_data {
  168.     u8          rslow_cfg;
  169.     u8          rslow_thr;
  170.     u8          rs_to_rslow[2];
  171.     u8          rslow_comp[4];
  172.     uint32_t        chg_rs_to_rslow;
  173.     uint32_t        chg_rslow_comp_c1;
  174.     uint32_t        chg_rslow_comp_c2;
  175.     uint32_t        chg_rslow_comp_thr;
  176.     bool            active;
  177.     struct mutex        lock;
  178. };
  179.  
  180. struct fg_cyc_ctr_data {
  181.     bool            en;
  182.     bool            started[BUCKET_COUNT];
  183.     u16         count[BUCKET_COUNT];
  184.     u8          last_soc[BUCKET_COUNT];
  185.     int         id;
  186.     struct mutex        lock;
  187. };
  188.  
  189. struct fg_iadc_comp_data {
  190.     u8          dfl_gain_reg[2];
  191.     bool            gain_active;
  192.     int64_t         dfl_gain;
  193. };
  194.  
  195. struct fg_cc_soc_data {
  196.     int init_sys_soc;
  197.     int init_cc_soc;
  198.     int full_capacity;
  199.     int delta_soc;
  200. };
  201.  
  202. /* FG_MEMIF setting index */
  203. enum fg_mem_setting_index {
  204.     FG_MEM_SOFT_COLD = 0,
  205.     FG_MEM_SOFT_HOT,
  206.     FG_MEM_HARD_COLD,
  207.     FG_MEM_HARD_HOT,
  208.     FG_MEM_RESUME_SOC,
  209.     FG_MEM_BCL_LM_THRESHOLD,
  210.     FG_MEM_BCL_MH_THRESHOLD,
  211.     FG_MEM_TERM_CURRENT,
  212.     FG_MEM_CHG_TERM_CURRENT,
  213.     FG_MEM_IRQ_VOLT_EMPTY,
  214.     FG_MEM_CUTOFF_VOLTAGE,
  215.     FG_MEM_VBAT_EST_DIFF,
  216.     FG_MEM_DELTA_SOC,
  217.     FG_MEM_BATT_LOW,
  218.     FG_MEM_THERM_DELAY,
  219.     FG_MEM_SETTING_MAX,
  220. };
  221.  
  222. /* FG_MEMIF data index */
  223. enum fg_mem_data_index {
  224.     FG_DATA_BATT_TEMP = 0,
  225.     FG_DATA_OCV,
  226.     FG_DATA_VOLTAGE,
  227.     FG_DATA_CURRENT,
  228.     FG_DATA_BATT_ESR,
  229.     FG_DATA_BATT_ESR_COUNT,
  230.     FG_DATA_BATT_SOC,
  231.     FG_DATA_CC_CHARGE,
  232.     FG_DATA_VINT_ERR,
  233.     FG_DATA_CPRED_VOLTAGE,
  234.     /* values below this only gets read once per profile reload */
  235.     FG_DATA_BATT_ID,
  236.     FG_DATA_BATT_ID_INFO,
  237.     FG_DATA_MAX,
  238. };
  239.  
  240. #define SETTING(_idx, _address, _offset, _value)    \
  241.     [FG_MEM_##_idx] = {             \
  242.         .address = _address,            \
  243.         .offset = _offset,          \
  244.         .value = _value,            \
  245.     }                       \
  246.  
  247. static struct fg_mem_setting settings[FG_MEM_SETTING_MAX] = {
  248.     /*       ID                    Address, Offset, Value*/
  249.     SETTING(SOFT_COLD,       0x454,   0,      100),
  250.     SETTING(SOFT_HOT,        0x454,   1,      400),
  251.     SETTING(HARD_COLD,       0x454,   2,      50),
  252.     SETTING(HARD_HOT,        0x454,   3,      450),
  253.     SETTING(RESUME_SOC,      0x45C,   1,      0),
  254.     SETTING(BCL_LM_THRESHOLD, 0x47C,   2,      50),
  255.     SETTING(BCL_MH_THRESHOLD, 0x47C,   3,      752),
  256.     SETTING(TERM_CURRENT,    0x40C,   2,      250),
  257.     SETTING(CHG_TERM_CURRENT, 0x4F8,   2,      250),
  258.     SETTING(IRQ_VOLT_EMPTY,  0x458,   3,      3100),
  259.     SETTING(CUTOFF_VOLTAGE,  0x40C,   0,      3200),
  260.     SETTING(VBAT_EST_DIFF,   0x000,   0,      30),
  261.     SETTING(DELTA_SOC,   0x450,   3,      1),
  262.     SETTING(BATT_LOW,    0x458,   0,      4200),
  263.     SETTING(THERM_DELAY,     0x4AC,   3,      0),
  264. };
  265.  
  266. #define DATA(_idx, _address, _offset, _length,  _value) \
  267.     [FG_DATA_##_idx] = {                \
  268.         .address = _address,            \
  269.         .offset = _offset,          \
  270.         .len = _length,         \
  271.         .value = _value,            \
  272.     }                       \
  273.  
  274. static struct fg_mem_data fg_data[FG_DATA_MAX] = {
  275.     /*       ID           Address, Offset, Length, Value*/
  276.     DATA(BATT_TEMP,       0x550,   2,      2,     -EINVAL),
  277.     DATA(OCV,             0x588,   3,      2,     -EINVAL),
  278.     DATA(VOLTAGE,         0x5CC,   1,      2,     -EINVAL),
  279.     DATA(CURRENT,         0x5CC,   3,      2,     -EINVAL),
  280.     DATA(BATT_ESR,        0x554,   2,      2,     -EINVAL),
  281.     DATA(BATT_ESR_COUNT,  0x558,   2,      2,     -EINVAL),
  282.     DATA(BATT_SOC,        0x56C,   1,      3,     -EINVAL),
  283.     DATA(CC_CHARGE,       0x570,   0,      4,     -EINVAL),
  284.     DATA(VINT_ERR,        0x560,   0,      4,     -EINVAL),
  285.     DATA(CPRED_VOLTAGE,   0x540,   0,      2,     -EINVAL),
  286.     DATA(BATT_ID,         0x594,   1,      1,     -EINVAL),
  287.     DATA(BATT_ID_INFO,    0x594,   3,      1,     -EINVAL),
  288. };
  289.  
  290. enum fg_mem_backup_index {
  291.     FG_BACKUP_SOC = 0,
  292.     FG_BACKUP_CYCLE_COUNT,
  293.     FG_BACKUP_CC_SOC_COEFF,
  294.     FG_BACKUP_IGAIN,
  295.     FG_BACKUP_VCOR,
  296.     FG_BACKUP_TEMP_COUNTER,
  297.     FG_BACKUP_AGING_STORAGE,
  298.     FG_BACKUP_MAH_TO_SOC,
  299.     FG_BACKUP_MAX,
  300. };
  301.  
  302. #define BACKUP(_idx, _address, _offset, _length,  _value)   \
  303.     [FG_BACKUP_##_idx] = {              \
  304.         .address = _address,            \
  305.         .offset = _offset,          \
  306.         .len = _length,         \
  307.         .value = _value,            \
  308.     }                       \
  309.  
  310. static struct fg_mem_data fg_backup_regs[FG_BACKUP_MAX] = {
  311.     /*       ID           Address, Offset, Length, Value*/
  312.     BACKUP(SOC,     0x564,   0,      24,     -EINVAL),
  313.     BACKUP(CYCLE_COUNT, 0x5E8,   0,      16,     -EINVAL),
  314.     BACKUP(CC_SOC_COEFF,    0x5BC,   0,      8,     -EINVAL),
  315.     BACKUP(IGAIN,       0x424,   0,      4,     -EINVAL),
  316.     BACKUP(VCOR,        0x484,   0,      4,     -EINVAL),
  317.     BACKUP(TEMP_COUNTER,    0x580,   0,      4,     -EINVAL),
  318.     BACKUP(AGING_STORAGE,   0x5E4,   0,      4,     -EINVAL),
  319.     BACKUP(MAH_TO_SOC,  0x4A0,   0,      4,     -EINVAL),
  320. };
  321.  
  322. static int fg_debug_mask;
  323. module_param_named(
  324.     debug_mask, fg_debug_mask, int, S_IRUSR | S_IWUSR
  325. );
  326.  
  327. static int fg_reset_on_lockup;
  328.  
  329. static int fg_sense_type = -EINVAL;
  330. static int fg_restart;
  331.  
  332. static int fg_est_dump;
  333. module_param_named(
  334.     first_est_dump, fg_est_dump, int, S_IRUSR | S_IWUSR
  335. );
  336.  
  337. static char *fg_batt_type;
  338. module_param_named(
  339.     battery_type, fg_batt_type, charp, S_IRUSR | S_IWUSR
  340. );
  341.  
  342. static int fg_sram_update_period_ms = 10000;
  343. module_param_named(
  344.     sram_update_period_ms, fg_sram_update_period_ms, int, S_IRUSR | S_IWUSR
  345. );
  346.  
  347. static bool fg_batt_valid_ocv;
  348. module_param_named(batt_valid_ocv, fg_batt_valid_ocv, bool, S_IRUSR | S_IWUSR);
  349.  
  350. static int fg_batt_range_pct;
  351. module_param_named(batt_range_pct, fg_batt_range_pct, int, S_IRUSR | S_IWUSR);
  352.  
  353. struct fg_irq {
  354.     int         irq;
  355.     bool            disabled;
  356.     bool            wakeup;
  357. };
  358.  
  359. enum fg_soc_irq {
  360.     HIGH_SOC,
  361.     LOW_SOC,
  362.     FULL_SOC,
  363.     EMPTY_SOC,
  364.     DELTA_SOC,
  365.     FIRST_EST_DONE,
  366.     SW_FALLBK_OCV,
  367.     SW_FALLBK_NEW_BATT,
  368.     FG_SOC_IRQ_COUNT,
  369. };
  370.  
  371. enum fg_batt_irq {
  372.     JEITA_SOFT_COLD,
  373.     JEITA_SOFT_HOT,
  374.     VBATT_LOW,
  375.     BATT_IDENTIFIED,
  376.     BATT_ID_REQ,
  377.     BATTERY_UNKNOWN,
  378.     BATT_MISSING,
  379.     BATT_MATCH,
  380.     FG_BATT_IRQ_COUNT,
  381. };
  382.  
  383. enum fg_mem_if_irq {
  384.     FG_MEM_AVAIL,
  385.     TA_RCVRY_SUG,
  386.     FG_MEM_IF_IRQ_COUNT,
  387. };
  388.  
  389. enum fg_batt_aging_mode {
  390.     FG_AGING_NONE,
  391.     FG_AGING_ESR,
  392.     FG_AGING_CC,
  393. };
  394.  
  395. enum register_type {
  396.     MEM_INTF_CFG,
  397.     MEM_INTF_CTL,
  398.     MEM_INTF_ADDR_LSB,
  399.     MEM_INTF_RD_DATA0,
  400.     MEM_INTF_WR_DATA0,
  401.     MAX_ADDRESS,
  402. };
  403.  
  404. enum batt_info_params {
  405.     BATT_INFO_NOTIFY = 0,
  406.     BATT_INFO_SOC,
  407.     BATT_INFO_RES_ID,
  408.     BATT_INFO_VOLTAGE,
  409.     BATT_INFO_TEMP,
  410.     BATT_INFO_FCC,
  411.     BATT_INFO_MAX,
  412. };
  413.  
  414. struct register_offset {
  415.     u16 address[MAX_ADDRESS];
  416. };
  417.  
  418. static struct register_offset offset[] = {
  419.     [0] = {
  420.              /* CFG   CTL   LSB   RD0   WD0 */
  421.         .address = {0x40, 0x41, 0x42, 0x4C, 0x48},
  422.     },
  423.     [1] = {
  424.              /* CFG   CTL   LSB   RD0   WD0 */
  425.         .address = {0x50, 0x51, 0x61, 0x67, 0x63},
  426.     },
  427. };
  428.  
  429. #define MEM_INTF_CFG(chip)  \
  430.         ((chip)->mem_base + (chip)->offset[MEM_INTF_CFG])
  431. #define MEM_INTF_CTL(chip)  \
  432.         ((chip)->mem_base + (chip)->offset[MEM_INTF_CTL])
  433. #define MEM_INTF_ADDR_LSB(chip) \
  434.         ((chip)->mem_base + (chip)->offset[MEM_INTF_ADDR_LSB])
  435. #define MEM_INTF_RD_DATA0(chip) \
  436.         ((chip)->mem_base + (chip)->offset[MEM_INTF_RD_DATA0])
  437. #define MEM_INTF_WR_DATA0(chip) \
  438.         ((chip)->mem_base + (chip)->offset[MEM_INTF_WR_DATA0])
  439.  
  440. struct fg_wakeup_source {
  441.     struct wakeup_source    source;
  442.     unsigned long       enabled;
  443. };
  444.  
  445. static void fg_stay_awake(struct fg_wakeup_source *source)
  446. {
  447.     if (!__test_and_set_bit(0, &source->enabled)) {
  448.         __pm_stay_awake(&source->source);
  449.         pr_debug("enabled source %s\n", source->source.name);
  450.     }
  451. }
  452.  
  453. static void fg_relax(struct fg_wakeup_source *source)
  454. {
  455.     if (__test_and_clear_bit(0, &source->enabled)) {
  456.         __pm_relax(&source->source);
  457.         pr_debug("disabled source %s\n", source->source.name);
  458.     }
  459. }
  460.  
  461. enum slope_limit_status {
  462.     LOW_TEMP_CHARGE,
  463.     HIGH_TEMP_CHARGE,
  464.     LOW_TEMP_DISCHARGE,
  465.     HIGH_TEMP_DISCHARGE,
  466.     SLOPE_LIMIT_MAX,
  467. };
  468.  
  469. #define VOLT_GAIN_MAX       3
  470. struct dischg_gain_soc {
  471.     bool            enable;
  472.     u32         soc[VOLT_GAIN_MAX];
  473.     u32         medc_gain[VOLT_GAIN_MAX];
  474.     u32         highc_gain[VOLT_GAIN_MAX];
  475. };
  476.  
  477. #define THERMAL_COEFF_N_BYTES       6
  478. struct fg_chip {
  479.     struct device       *dev;
  480.     struct spmi_device  *spmi;
  481.     u8          pmic_subtype;
  482.     u8          pmic_revision[4];
  483.     u8          revision[4];
  484.     u16         soc_base;
  485.     u16         batt_base;
  486.     u16         mem_base;
  487.     u16         vbat_adc_addr;
  488.     u16         ibat_adc_addr;
  489.     u16         tp_rev_addr;
  490.     u32         wa_flag;
  491.     atomic_t        memif_user_cnt;
  492.     struct fg_irq       soc_irq[FG_SOC_IRQ_COUNT];
  493.     struct fg_irq       batt_irq[FG_BATT_IRQ_COUNT];
  494.     struct fg_irq       mem_irq[FG_MEM_IF_IRQ_COUNT];
  495.     struct completion   sram_access_granted;
  496.     struct completion   sram_access_revoked;
  497.     struct completion   fg_sram_updating_done;
  498.     struct completion   batt_id_avail;
  499.     struct completion   first_soc_done;
  500.     struct power_supply bms_psy;
  501.     spinlock_t      sec_access_lock;
  502.     struct mutex        rw_lock;
  503.     struct mutex        sysfs_restart_lock;
  504.     struct delayed_work batt_profile_init;
  505.     struct work_struct  dump_sram;
  506.     struct work_struct  status_change_work;
  507.     struct work_struct  cycle_count_work;
  508.     struct work_struct  battery_age_work;
  509.     struct work_struct  update_esr_work;
  510.     struct work_struct  set_resume_soc_work;
  511.     struct work_struct  rslow_comp_work;
  512.     struct work_struct  sysfs_restart_work;
  513.     struct work_struct  init_work;
  514.     struct work_struct  charge_full_work;
  515.     struct work_struct  gain_comp_work;
  516.     struct work_struct  bcl_hi_power_work;
  517.     struct power_supply *batt_psy;
  518.     struct power_supply *usb_psy;
  519.     struct power_supply *dc_psy;
  520.     struct fg_wakeup_source memif_wakeup_source;
  521.     struct fg_wakeup_source profile_wakeup_source;
  522.     struct fg_wakeup_source empty_check_wakeup_source;
  523.     struct fg_wakeup_source resume_soc_wakeup_source;
  524.     struct fg_wakeup_source gain_comp_wakeup_source;
  525.     struct fg_wakeup_source capacity_learning_wakeup_source;
  526.     bool            first_profile_loaded;
  527.     struct fg_wakeup_source update_temp_wakeup_source;
  528.     struct fg_wakeup_source update_sram_wakeup_source;
  529.     bool            fg_restarting;
  530.     bool            profile_loaded;
  531.     bool            soc_reporting_ready;
  532.     bool            use_otp_profile;
  533.     bool            battery_missing;
  534.     bool            power_supply_registered;
  535.     bool            sw_rbias_ctrl;
  536.     bool            use_thermal_coefficients;
  537.     bool            esr_strict_filter;
  538.     bool            soc_empty;
  539.     bool            charge_done;
  540.     bool            resume_soc_lowered;
  541.     bool            vbat_low_irq_enabled;
  542.     bool            full_soc_irq_enabled;
  543.     bool            charge_full;
  544.     bool            hold_soc_while_full;
  545.     bool            input_present;
  546.     bool            otg_present;
  547.     bool            safety_timer_expired;
  548.     bool            bad_batt_detection_en;
  549.     bool            bcl_lpm_disabled;
  550.     bool            charging_disabled;
  551.     bool            use_vbat_low_empty_soc;
  552.     bool            fg_shutdown;
  553.     bool            use_soft_jeita_irq;
  554.     bool            allow_false_negative_isense;
  555.     bool            fg_force_restart_enable;
  556.     struct delayed_work update_jeita_setting;
  557.     struct delayed_work update_sram_data;
  558.     struct delayed_work update_temp_work;
  559.     struct delayed_work check_empty_work;
  560.     char            *batt_profile;
  561.     u8          thermal_coefficients[THERMAL_COEFF_N_BYTES];
  562.     u32         cc_cv_threshold_mv;
  563.     unsigned int        batt_profile_len;
  564.     unsigned int        batt_max_voltage_uv;
  565.     const char      *batt_type;
  566.     const char      *batt_psy_name;
  567.     unsigned long       last_sram_update_time;
  568.     unsigned long       last_temp_update_time;
  569.     int64_t         ocv_coeffs[12];
  570.     int64_t         cutoff_voltage;
  571.     int         evaluation_current;
  572.     int         ocv_junction_p1p2;
  573.     int         ocv_junction_p2p3;
  574.     int         nom_cap_uah;
  575.     int         actual_cap_uah;
  576.     int         status;
  577.     int         prev_status;
  578.     int         health;
  579.     enum fg_batt_aging_mode batt_aging_mode;
  580.     struct alarm        hard_jeita_alarm;
  581.     /* capacity learning */
  582.     struct fg_learning_data learning_data;
  583.     struct alarm        fg_cap_learning_alarm;
  584.     struct work_struct  fg_cap_learning_work;
  585.     struct fg_cc_soc_data   sw_cc_soc_data;
  586.     /* rslow compensation */
  587.     struct fg_rslow_data    rslow_comp;
  588.     int         rconn_mohm;
  589.     /* cycle counter */
  590.     struct fg_cyc_ctr_data  cyc_ctr;
  591.     /* iadc compensation */
  592.     struct fg_iadc_comp_data iadc_comp_data;
  593.     /* interleaved memory access */
  594.     u16         *offset;
  595.     bool            ima_supported;
  596.     bool            init_done;
  597.     /* jeita hysteresis */
  598.     bool            jeita_hysteresis_support;
  599.     bool            batt_hot;
  600.     bool            batt_cold;
  601.     bool            batt_warm;
  602.     bool            batt_cool;
  603.     int         cold_hysteresis;
  604.     int         hot_hysteresis;
  605.     /* ESR pulse tuning */
  606.     struct fg_wakeup_source esr_extract_wakeup_source;
  607.     struct work_struct  esr_extract_config_work;
  608.     bool            esr_extract_disabled;
  609.     bool            imptr_pulse_slow_en;
  610.     bool            esr_pulse_tune_en;
  611.     bool            dummy_battery_fake_temp;
  612.     /* Slope limiter */
  613.     struct work_struct  slope_limiter_work;
  614.     struct fg_wakeup_source slope_limit_wakeup_source;
  615.     bool            soc_slope_limiter_en;
  616.     enum slope_limit_status slope_limit_sts;
  617.     u32         slope_limit_temp;
  618.     u32         slope_limit_coeffs[SLOPE_LIMIT_MAX];
  619.     /* Discharge soc gain */
  620.     struct work_struct  dischg_gain_work;
  621.     struct fg_wakeup_source dischg_gain_wakeup_source;
  622.     struct dischg_gain_soc  dischg_gain;
  623.     /* IMA error recovery */
  624.     struct completion   fg_reset_done;
  625.     struct work_struct  ima_error_recovery_work;
  626.     struct fg_wakeup_source fg_reset_wakeup_source;
  627.     struct mutex        ima_recovery_lock;
  628.     bool            ima_error_handling;
  629.     bool            block_sram_access;
  630.     bool            irqs_enabled;
  631.     bool            use_last_soc;
  632.     int         last_soc;
  633.     /* Validating temperature */
  634.     int         last_good_temp;
  635.     int         batt_temp_low_limit;
  636.     int         batt_temp_high_limit;
  637.     /* Validating CC_SOC */
  638.     struct work_struct  cc_soc_store_work;
  639.     struct fg_wakeup_source cc_soc_wakeup_source;
  640.     int         cc_soc_limit_pct;
  641.     bool            use_last_cc_soc;
  642.     int64_t         last_cc_soc;
  643.     /* Sanity check */
  644.     struct delayed_work check_sanity_work;
  645.     struct fg_wakeup_source sanity_wakeup_source;
  646.     u8          last_beat_count;
  647.     /* Batt_info restore */
  648.     int         batt_info[BATT_INFO_MAX];
  649.     int         batt_info_id;
  650.     bool            batt_info_restore;
  651.     bool            *batt_range_ocv;
  652.     int         *batt_range_pct;
  653.     struct notifier_block   fg_reboot;
  654.     bool            shutdown_in_process;
  655.     bool            low_batt_temp_comp;
  656. };
  657.  
  658. /* FG_MEMIF DEBUGFS structures */
  659. #define ADDR_LEN    4   /* 3 byte address + 1 space character */
  660. #define CHARS_PER_ITEM  3   /* Format is 'XX ' */
  661. #define ITEMS_PER_LINE  4   /* 4 data items per line */
  662. #define MAX_LINE_LENGTH  (ADDR_LEN + (ITEMS_PER_LINE * CHARS_PER_ITEM) + 1)
  663. #define MAX_REG_PER_TRANSACTION (8)
  664.  
  665. static const char *DFS_ROOT_NAME    = "fg_memif";
  666. static const mode_t DFS_MODE        =  S_IRUSR | S_IWUSR;
  667. static const char *default_batt_type    = "Unknown Battery";
  668. static const char *loading_batt_type    = "Loading Battery Data";
  669. static const char *missing_batt_type    = "Disconnected Battery";
  670.  
  671. /* Log buffer */
  672. struct fg_log_buffer {
  673.     size_t rpos;    /* Current 'read' position in buffer */
  674.     size_t wpos;    /* Current 'write' position in buffer */
  675.     size_t len; /* Length of the buffer */
  676.     char data[0];   /* Log buffer */
  677. };
  678.  
  679. /* transaction parameters */
  680. struct fg_trans {
  681.     u32 cnt;    /* Number of bytes to read */
  682.     u16 addr;   /* 12-bit address in SRAM */
  683.     u32 offset; /* Offset of last read data + byte offset */
  684.     struct fg_chip *chip;
  685.     struct fg_log_buffer *log; /* log buffer */
  686.     u8 *data;   /* fg data that is read */
  687.     struct mutex memif_dfs_lock; /* Prevent thread concurrency */
  688. };
  689.  
  690. struct fg_dbgfs {
  691.     u32 cnt;
  692.     u32 addr;
  693.     struct fg_chip *chip;
  694.     struct dentry *root;
  695.     struct mutex  lock;
  696.     struct debugfs_blob_wrapper help_msg;
  697. };
  698.  
  699. static struct fg_dbgfs dbgfs_data = {
  700.     .lock = __MUTEX_INITIALIZER(dbgfs_data.lock),
  701.     .help_msg = {
  702.     .data =
  703. "FG Debug-FS support\n"
  704. "\n"
  705. "Hierarchy schema:\n"
  706. "/sys/kernel/debug/fg_memif\n"
  707. "       /help            -- Static help text\n"
  708. "       /address  -- Starting register address for reads or writes\n"
  709. "       /count    -- Number of registers to read (only used for reads)\n"
  710. "       /data     -- Initiates the SRAM read (formatted output)\n"
  711. "\n",
  712.     },
  713. };
  714.  
  715. static const struct of_device_id fg_match_table[] = {
  716.     {   .compatible = QPNP_FG_DEV_NAME, },
  717.     {}
  718. };
  719.  
  720. static char *fg_supplicants[] = {
  721.     "battery",
  722.     "bcl",
  723.     "fg_adc"
  724. };
  725.  
  726. #define DEBUG_PRINT_BUFFER_SIZE 64
  727. static void fill_string(char *str, size_t str_len, u8 *buf, int buf_len)
  728. {
  729.     int pos = 0;
  730.     int i;
  731.  
  732.     for (i = 0; i < buf_len; i++) {
  733.         pos += scnprintf(str + pos, str_len - pos, "%02X", buf[i]);
  734.         if (i < buf_len - 1)
  735.             pos += scnprintf(str + pos, str_len - pos, " ");
  736.     }
  737. }
  738.  
  739. static int fg_write(struct fg_chip *chip, u8 *val, u16 addr, int len)
  740. {
  741.     int rc = 0;
  742.     struct spmi_device *spmi = chip->spmi;
  743.     char str[DEBUG_PRINT_BUFFER_SIZE];
  744.  
  745.     if ((addr & 0xff00) == 0) {
  746.         pr_err("addr cannot be zero base=0x%02x sid=0x%02x rc=%d\n",
  747.             addr, spmi->sid, rc);
  748.         return -EINVAL;
  749.     }
  750.  
  751.     rc = spmi_ext_register_writel(spmi->ctrl, spmi->sid, addr, val, len);
  752.     if (rc) {
  753.         pr_err("write failed addr=0x%02x sid=0x%02x rc=%d\n",
  754.             addr, spmi->sid, rc);
  755.         return rc;
  756.     }
  757.  
  758.     if (!rc && (fg_debug_mask & FG_SPMI_DEBUG_WRITES)) {
  759.         str[0] = '\0';
  760.         fill_string(str, DEBUG_PRINT_BUFFER_SIZE, val, len);
  761.         pr_info("write(0x%04X), sid=%d, len=%d; %s\n",
  762.             addr, spmi->sid, len, str);
  763.     }
  764.  
  765.     return rc;
  766. }
  767.  
  768. static int fg_read(struct fg_chip *chip, u8 *val, u16 addr, int len)
  769. {
  770.     int rc = 0;
  771.     struct spmi_device *spmi = chip->spmi;
  772.     char str[DEBUG_PRINT_BUFFER_SIZE];
  773.  
  774.     if ((addr & 0xff00) == 0) {
  775.         pr_err("base cannot be zero base=0x%02x sid=0x%02x rc=%d\n",
  776.             addr, spmi->sid, rc);
  777.         return -EINVAL;
  778.     }
  779.  
  780.     rc = spmi_ext_register_readl(spmi->ctrl, spmi->sid, addr, val, len);
  781.     if (rc) {
  782.         pr_err("SPMI read failed base=0x%02x sid=0x%02x rc=%d\n", addr,
  783.                 spmi->sid, rc);
  784.         return rc;
  785.     }
  786.  
  787.     if (!rc && (fg_debug_mask & FG_SPMI_DEBUG_READS)) {
  788.         str[0] = '\0';
  789.         fill_string(str, DEBUG_PRINT_BUFFER_SIZE, val, len);
  790.         pr_info("read(0x%04x), sid=%d, len=%d; %s\n",
  791.             addr, spmi->sid, len, str);
  792.     }
  793.  
  794.     return rc;
  795. }
  796.  
  797. static int fg_masked_write_raw(struct fg_chip *chip, u16 addr,
  798.         u8 mask, u8 val, int len)
  799. {
  800.     int rc;
  801.     u8 reg;
  802.  
  803.     rc = fg_read(chip, &reg, addr, len);
  804.     if (rc) {
  805.         pr_err("spmi read failed: addr=%03X, rc=%d\n", addr, rc);
  806.         return rc;
  807.     }
  808.     pr_debug("addr = 0x%x read 0x%x\n", addr, reg);
  809.  
  810.     reg &= ~mask;
  811.     reg |= val & mask;
  812.  
  813.     pr_debug("Writing 0x%x\n", reg);
  814.  
  815.     rc = fg_write(chip, &reg, addr, len);
  816.     if (rc)
  817.         pr_err("spmi write failed: addr=%03X, rc=%d\n", addr, rc);
  818.  
  819.     return rc;
  820. }
  821.  
  822. static int fg_masked_write(struct fg_chip *chip, u16 addr,
  823.         u8 mask, u8 val, int len)
  824. {
  825.     int rc;
  826.     unsigned long flags;
  827.  
  828.     spin_lock_irqsave(&chip->sec_access_lock, flags);
  829.     rc = fg_masked_write_raw(chip, addr, mask, val, len);
  830.     spin_unlock_irqrestore(&chip->sec_access_lock, flags);
  831.  
  832.     return rc;
  833. }
  834.  
  835. #define SEC_ACCESS_OFFSET   0xD0
  836. #define SEC_ACCESS_VALUE    0xA5
  837. #define PERIPHERAL_MASK     0xFF
  838. static int fg_sec_masked_write(struct fg_chip *chip, u16 addr, u8 mask, u8 val,
  839.         int len)
  840. {
  841.     int rc;
  842.     unsigned long flags;
  843.     u8 temp;
  844.     u16 base = addr & (~PERIPHERAL_MASK);
  845.  
  846.     spin_lock_irqsave(&chip->sec_access_lock, flags);
  847.     temp = SEC_ACCESS_VALUE;
  848.     rc = fg_write(chip, &temp, base + SEC_ACCESS_OFFSET, 1);
  849.     if (rc) {
  850.         pr_err("Unable to unlock sec_access: %d\n", rc);
  851.         goto out;
  852.     }
  853.  
  854.     rc = fg_masked_write_raw(chip, addr, mask, val, len);
  855.     if (rc)
  856.         pr_err("Unable to write securely to address 0x%x: %d", addr,
  857.             rc);
  858. out:
  859.     spin_unlock_irqrestore(&chip->sec_access_lock, flags);
  860.     return rc;
  861. }
  862.  
  863. #define RIF_MEM_ACCESS_REQ  BIT(7)
  864. static int fg_check_rif_mem_access(struct fg_chip *chip, bool *status)
  865. {
  866.     int rc;
  867.     u8 mem_if_sts;
  868.  
  869.     rc = fg_read(chip, &mem_if_sts, MEM_INTF_CFG(chip), 1);
  870.     if (rc) {
  871.         pr_err("failed to read rif_mem status rc=%d\n", rc);
  872.         return rc;
  873.     }
  874.  
  875.     *status = mem_if_sts & RIF_MEM_ACCESS_REQ;
  876.     return 0;
  877. }
  878.  
  879. static bool fg_check_sram_access(struct fg_chip *chip)
  880. {
  881.     int rc;
  882.     u8 mem_if_sts;
  883.     bool rif_mem_sts = false;
  884.  
  885.     rc = fg_read(chip, &mem_if_sts, INT_RT_STS(chip->mem_base), 1);
  886.     if (rc) {
  887.         pr_err("failed to read mem status rc=%d\n", rc);
  888.         return false;
  889.     }
  890.  
  891.     if ((mem_if_sts & BIT(FG_MEM_AVAIL)) == 0)
  892.         return false;
  893.  
  894.     rc = fg_check_rif_mem_access(chip, &rif_mem_sts);
  895.     if (rc)
  896.         return false;
  897.  
  898.     return rif_mem_sts;
  899. }
  900.  
  901. static inline int fg_assert_sram_access(struct fg_chip *chip)
  902. {
  903.     int rc;
  904.     u8 mem_if_sts;
  905.  
  906.     rc = fg_read(chip, &mem_if_sts, INT_RT_STS(chip->mem_base), 1);
  907.     if (rc) {
  908.         pr_err("failed to read mem status rc=%d\n", rc);
  909.         return rc;
  910.     }
  911.  
  912.     if ((mem_if_sts & BIT(FG_MEM_AVAIL)) == 0) {
  913.         pr_err("mem_avail not high: %02x\n", mem_if_sts);
  914.         return -EINVAL;
  915.     }
  916.  
  917.     rc = fg_read(chip, &mem_if_sts, MEM_INTF_CFG(chip), 1);
  918.     if (rc) {
  919.         pr_err("failed to read mem status rc=%d\n", rc);
  920.         return rc;
  921.     }
  922.  
  923.     if ((mem_if_sts & RIF_MEM_ACCESS_REQ) == 0) {
  924.         pr_err("mem_avail not high: %02x\n", mem_if_sts);
  925.         return -EINVAL;
  926.     }
  927.  
  928.     return 0;
  929. }
  930.  
  931. #define INTF_CTL_BURST      BIT(7)
  932. #define INTF_CTL_WR_EN      BIT(6)
  933. static int fg_config_access(struct fg_chip *chip, bool write,
  934.         bool burst, bool otp)
  935. {
  936.     int rc;
  937.     u8 intf_ctl = 0;
  938.  
  939.     if (otp) {
  940.         /* Configure OTP access */
  941.         rc = fg_masked_write(chip, chip->mem_base + OTP_CFG1,
  942.                 0xFF, 0x00, 1);
  943.         if (rc) {
  944.             pr_err("failed to set OTP cfg\n");
  945.             return -EIO;
  946.         }
  947.     }
  948.  
  949.     intf_ctl = (write ? INTF_CTL_WR_EN : 0) | (burst ? INTF_CTL_BURST : 0);
  950.  
  951.     rc = fg_write(chip, &intf_ctl, MEM_INTF_CTL(chip), 1);
  952.     if (rc) {
  953.         pr_err("failed to set mem access bit\n");
  954.         return -EIO;
  955.     }
  956.  
  957.     return rc;
  958. }
  959.  
  960. static int fg_req_and_wait_access(struct fg_chip *chip, int timeout)
  961. {
  962.     int rc = 0, ret = 0;
  963.     bool tried_again = false;
  964.  
  965.     if (!fg_check_sram_access(chip)) {
  966.         rc = fg_masked_write(chip, MEM_INTF_CFG(chip),
  967.             RIF_MEM_ACCESS_REQ, RIF_MEM_ACCESS_REQ, 1);
  968.         if (rc) {
  969.             pr_err("failed to set mem access bit\n");
  970.             return -EIO;
  971.         }
  972.         fg_stay_awake(&chip->memif_wakeup_source);
  973.     }
  974.  
  975. wait:
  976.     /* Wait for MEM_AVAIL IRQ. */
  977.     ret = wait_for_completion_interruptible_timeout(
  978.             &chip->sram_access_granted,
  979.             msecs_to_jiffies(timeout));
  980.     /* If we were interrupted wait again one more time. */
  981.     if (ret == -ERESTARTSYS && !tried_again) {
  982.         tried_again = true;
  983.         goto wait;
  984.     } else if (ret <= 0) {
  985.         rc = -ETIMEDOUT;
  986.         pr_err("transaction timed out rc=%d\n", rc);
  987.         return rc;
  988.     }
  989.  
  990.     return rc;
  991. }
  992.  
  993. static int fg_release_access(struct fg_chip *chip)
  994. {
  995.     int rc;
  996.  
  997.     if (!chip->shutdown_in_process)
  998.         rc = fg_masked_write(chip, MEM_INTF_CFG(chip),
  999.                      RIF_MEM_ACCESS_REQ, 0, 1);
  1000.  
  1001.     fg_relax(&chip->memif_wakeup_source);
  1002.     reinit_completion(&chip->sram_access_granted);
  1003.  
  1004.     return rc;
  1005. }
  1006.  
  1007. static void fg_release_access_if_necessary(struct fg_chip *chip)
  1008. {
  1009.     mutex_lock(&chip->rw_lock);
  1010.     if (atomic_sub_return(1, &chip->memif_user_cnt) <= 0) {
  1011.         fg_release_access(chip);
  1012.     }
  1013.     mutex_unlock(&chip->rw_lock);
  1014. }
  1015.  
  1016. /*
  1017.  * fg_mem_lock disallows the fuel gauge to release access until it has been
  1018.  * released.
  1019.  *
  1020.  * an equal number of calls must be made to fg_mem_release for the fuel gauge
  1021.  * driver to release the sram access.
  1022.  */
  1023. static void fg_mem_lock(struct fg_chip *chip)
  1024. {
  1025.     mutex_lock(&chip->rw_lock);
  1026.     atomic_add_return(1, &chip->memif_user_cnt);
  1027.     mutex_unlock(&chip->rw_lock);
  1028. }
  1029.  
  1030. static void fg_mem_release(struct fg_chip *chip)
  1031. {
  1032.     fg_release_access_if_necessary(chip);
  1033. }
  1034.  
  1035. static int fg_set_ram_addr(struct fg_chip *chip, u16 *address)
  1036. {
  1037.     int rc;
  1038.  
  1039.     rc = fg_write(chip, (u8 *) address,
  1040.         chip->mem_base + chip->offset[MEM_INTF_ADDR_LSB], 2);
  1041.     if (rc) {
  1042.         pr_err("spmi write failed: addr=%03X, rc=%d\n",
  1043.             chip->mem_base + chip->offset[MEM_INTF_ADDR_LSB], rc);
  1044.         return rc;
  1045.     }
  1046.  
  1047.     return rc;
  1048. }
  1049.  
  1050. #define BUF_LEN     4
  1051. static int fg_sub_mem_read(struct fg_chip *chip, u8 *val, u16 address, int len,
  1052.         int offset)
  1053. {
  1054.     int rc, total_len;
  1055.     u8 *rd_data = val;
  1056.     bool otp;
  1057.     char str[DEBUG_PRINT_BUFFER_SIZE];
  1058.  
  1059.     if (address < RAM_OFFSET)
  1060.         otp = true;
  1061.  
  1062.     rc = fg_config_access(chip, 0, (len > 4), otp);
  1063.     if (rc)
  1064.         return rc;
  1065.  
  1066.     rc = fg_set_ram_addr(chip, &address);
  1067.     if (rc)
  1068.         return rc;
  1069.  
  1070.     if (fg_debug_mask & FG_MEM_DEBUG_READS)
  1071.         pr_info("length %d addr=%02X\n", len, address);
  1072.  
  1073.     total_len = len;
  1074.     while (len > 0) {
  1075.         if (!offset) {
  1076.             rc = fg_read(chip, rd_data, MEM_INTF_RD_DATA0(chip),
  1077.                             min(len, BUF_LEN));
  1078.         } else {
  1079.             rc = fg_read(chip, rd_data,
  1080.                 MEM_INTF_RD_DATA0(chip) + offset,
  1081.                 min(len, BUF_LEN - offset));
  1082.  
  1083.             /* manually set address to allow continuous reads */
  1084.             address += BUF_LEN;
  1085.  
  1086.             rc = fg_set_ram_addr(chip, &address);
  1087.             if (rc)
  1088.                 return rc;
  1089.         }
  1090.         if (rc) {
  1091.             pr_err("spmi read failed: addr=%03x, rc=%d\n",
  1092.                 MEM_INTF_RD_DATA0(chip) + offset, rc);
  1093.             return rc;
  1094.         }
  1095.         rd_data += (BUF_LEN - offset);
  1096.         len -= (BUF_LEN - offset);
  1097.         offset = 0;
  1098.     }
  1099.  
  1100.     if (fg_debug_mask & FG_MEM_DEBUG_READS) {
  1101.         fill_string(str, DEBUG_PRINT_BUFFER_SIZE, val, total_len);
  1102.         pr_info("data: %s\n", str);
  1103.     }
  1104.     return rc;
  1105. }
  1106.  
  1107. static int fg_conventional_mem_read(struct fg_chip *chip, u8 *val, u16 address,
  1108.         int len, int offset, bool keep_access)
  1109. {
  1110.     int rc = 0, user_cnt = 0, orig_address = address;
  1111.  
  1112.     if (offset > 3) {
  1113.         pr_err("offset too large %d\n", offset);
  1114.         return -EINVAL;
  1115.     }
  1116.  
  1117.     address = ((orig_address + offset) / 4) * 4;
  1118.     offset = (orig_address + offset) % 4;
  1119.  
  1120.     user_cnt = atomic_add_return(1, &chip->memif_user_cnt);
  1121.     if (fg_debug_mask & FG_MEM_DEBUG_READS)
  1122.         pr_info("user_cnt %d\n", user_cnt);
  1123.     mutex_lock(&chip->rw_lock);
  1124.     if (!fg_check_sram_access(chip)) {
  1125.         rc = fg_req_and_wait_access(chip, MEM_IF_TIMEOUT_MS);
  1126.         if (rc)
  1127.             goto out;
  1128.     }
  1129.  
  1130.     rc = fg_sub_mem_read(chip, val, address, len, offset);
  1131.  
  1132. out:
  1133.     user_cnt = atomic_sub_return(1, &chip->memif_user_cnt);
  1134.     if (fg_debug_mask & FG_MEM_DEBUG_READS)
  1135.         pr_info("user_cnt %d\n", user_cnt);
  1136.  
  1137.     fg_assert_sram_access(chip);
  1138.  
  1139.     if (!keep_access && (user_cnt == 0) && !rc) {
  1140.         rc = fg_release_access(chip);
  1141.         if (rc) {
  1142.             pr_err("failed to set mem access bit\n");
  1143.             rc = -EIO;
  1144.         }
  1145.     }
  1146.  
  1147.     mutex_unlock(&chip->rw_lock);
  1148.     return rc;
  1149. }
  1150.  
  1151. static int fg_conventional_mem_write(struct fg_chip *chip, u8 *val, u16 address,
  1152.         int len, int offset, bool keep_access)
  1153. {
  1154.     int rc = 0, user_cnt = 0, sublen;
  1155.     bool access_configured = false;
  1156.     u8 *wr_data = val, word[4];
  1157.     u16 orig_address = address;
  1158.     char str[DEBUG_PRINT_BUFFER_SIZE];
  1159.  
  1160.     if (address < RAM_OFFSET)
  1161.         return -EINVAL;
  1162.  
  1163.     if (offset > 3)
  1164.         return -EINVAL;
  1165.  
  1166.     address = ((orig_address + offset) / 4) * 4;
  1167.     offset = (orig_address + offset) % 4;
  1168.  
  1169.     user_cnt = atomic_add_return(1, &chip->memif_user_cnt);
  1170.     if (fg_debug_mask & FG_MEM_DEBUG_WRITES)
  1171.         pr_info("user_cnt %d\n", user_cnt);
  1172.     mutex_lock(&chip->rw_lock);
  1173.     if (!fg_check_sram_access(chip)) {
  1174.         rc = fg_req_and_wait_access(chip, MEM_IF_TIMEOUT_MS);
  1175.         if (rc)
  1176.             goto out;
  1177.     }
  1178.  
  1179.     if (fg_debug_mask & FG_MEM_DEBUG_WRITES) {
  1180.         pr_info("length %d addr=%02X offset=%d\n",
  1181.                 len, address, offset);
  1182.         fill_string(str, DEBUG_PRINT_BUFFER_SIZE, wr_data, len);
  1183.         pr_info("writing: %s\n", str);
  1184.     }
  1185.  
  1186.     while (len > 0) {
  1187.         if (offset != 0) {
  1188.             sublen = min(4 - offset, len);
  1189.             rc = fg_sub_mem_read(chip, word, address, 4, 0);
  1190.             if (rc)
  1191.                 goto out;
  1192.             memcpy(word + offset, wr_data, sublen);
  1193.             /* configure access as burst if more to write */
  1194.             rc = fg_config_access(chip, 1, (len - sublen) > 0, 0);
  1195.             if (rc)
  1196.                 goto out;
  1197.             rc = fg_set_ram_addr(chip, &address);
  1198.             if (rc)
  1199.                 goto out;
  1200.             offset = 0;
  1201.             access_configured = true;
  1202.         } else if (len >= 4) {
  1203.             if (!access_configured) {
  1204.                 rc = fg_config_access(chip, 1, len > 4, 0);
  1205.                 if (rc)
  1206.                     goto out;
  1207.                 rc = fg_set_ram_addr(chip, &address);
  1208.                 if (rc)
  1209.                     goto out;
  1210.                 access_configured = true;
  1211.             }
  1212.             sublen = 4;
  1213.             memcpy(word, wr_data, 4);
  1214.         } else if (len > 0 && len < 4) {
  1215.             sublen = len;
  1216.             rc = fg_sub_mem_read(chip, word, address, 4, 0);
  1217.             if (rc)
  1218.                 goto out;
  1219.             memcpy(word, wr_data, sublen);
  1220.             rc = fg_config_access(chip, 1, 0, 0);
  1221.             if (rc)
  1222.                 goto out;
  1223.             rc = fg_set_ram_addr(chip, &address);
  1224.             if (rc)
  1225.                 goto out;
  1226.             access_configured = true;
  1227.         } else {
  1228.             pr_err("Invalid length: %d\n", len);
  1229.             break;
  1230.         }
  1231.         rc = fg_write(chip, word, MEM_INTF_WR_DATA0(chip), 4);
  1232.         if (rc) {
  1233.             pr_err("spmi write failed: addr=%03x, rc=%d\n",
  1234.                     MEM_INTF_WR_DATA0(chip), rc);
  1235.             goto out;
  1236.         }
  1237.         len -= sublen;
  1238.         wr_data += sublen;
  1239.         address += 4;
  1240.     }
  1241.  
  1242. out:
  1243.     user_cnt = atomic_sub_return(1, &chip->memif_user_cnt);
  1244.     if (fg_debug_mask & FG_MEM_DEBUG_WRITES)
  1245.         pr_info("user_cnt %d\n", user_cnt);
  1246.  
  1247.     fg_assert_sram_access(chip);
  1248.  
  1249.     if (!keep_access && (user_cnt == 0) && !rc) {
  1250.         rc = fg_release_access(chip);
  1251.         if (rc) {
  1252.             pr_err("failed to set mem access bit\n");
  1253.             rc = -EIO;
  1254.         }
  1255.     }
  1256.  
  1257.     mutex_unlock(&chip->rw_lock);
  1258.     return rc;
  1259. }
  1260.  
  1261. #define MEM_INTF_IMA_CFG        0x52
  1262. #define MEM_INTF_IMA_OPR_STS        0x54
  1263. #define MEM_INTF_IMA_ERR_STS        0x5F
  1264. #define MEM_INTF_IMA_EXP_STS        0x55
  1265. #define MEM_INTF_IMA_HW_STS     0x56
  1266. #define MEM_INTF_IMA_BYTE_EN        0x60
  1267. #define IMA_IACS_CLR            BIT(2)
  1268. #define IMA_IACS_RDY            BIT(1)
  1269. static int fg_run_iacs_clear_sequence(struct fg_chip *chip)
  1270. {
  1271.     int rc = 0;
  1272.     u8 temp;
  1273.  
  1274.     if (fg_debug_mask & FG_STATUS)
  1275.         pr_info("Running IACS clear sequence\n");
  1276.  
  1277.     /* clear the error */
  1278.     rc = fg_masked_write(chip, chip->mem_base + MEM_INTF_IMA_CFG,
  1279.                 IMA_IACS_CLR, IMA_IACS_CLR, 1);
  1280.     if (rc) {
  1281.         pr_err("Error writing to IMA_CFG, rc=%d\n", rc);
  1282.         return rc;
  1283.     }
  1284.  
  1285.     temp = 0x4;
  1286.     rc = fg_write(chip, &temp, MEM_INTF_ADDR_LSB(chip) + 1, 1);
  1287.     if (rc) {
  1288.         pr_err("Error writing to MEM_INTF_ADDR_MSB, rc=%d\n", rc);
  1289.         return rc;
  1290.     }
  1291.  
  1292.     temp = 0x0;
  1293.     rc = fg_write(chip, &temp, MEM_INTF_WR_DATA0(chip) + 3, 1);
  1294.     if (rc) {
  1295.         pr_err("Error writing to WR_DATA3, rc=%d\n", rc);
  1296.         return rc;
  1297.     }
  1298.  
  1299.     rc = fg_read(chip, &temp, MEM_INTF_RD_DATA0(chip) + 3, 1);
  1300.     if (rc) {
  1301.         pr_err("Error writing to RD_DATA3, rc=%d\n", rc);
  1302.         return rc;
  1303.     }
  1304.  
  1305.     rc = fg_masked_write(chip, chip->mem_base + MEM_INTF_IMA_CFG,
  1306.                 IMA_IACS_CLR, 0, 1);
  1307.     if (rc) {
  1308.         pr_err("Error writing to IMA_CFG, rc=%d\n", rc);
  1309.         return rc;
  1310.     }
  1311.  
  1312.     if (fg_debug_mask & FG_STATUS)
  1313.         pr_info("IACS clear sequence complete!\n");
  1314.     return rc;
  1315. }
  1316.  
  1317. #define IACS_ERR_BIT        BIT(0)
  1318. #define XCT_ERR_BIT     BIT(1)
  1319. #define DATA_RD_ERR_BIT     BIT(3)
  1320. #define DATA_WR_ERR_BIT     BIT(4)
  1321. #define ADDR_BURST_WRAP_BIT BIT(5)
  1322. #define ADDR_RNG_ERR_BIT    BIT(6)
  1323. #define ADDR_SRC_ERR_BIT    BIT(7)
  1324. static int fg_check_ima_exception(struct fg_chip *chip, bool check_hw_sts)
  1325. {
  1326.     int rc = 0, ret = 0;
  1327.     u8 err_sts = 0, exp_sts = 0, hw_sts = 0;
  1328.     bool run_err_clr_seq = false;
  1329.  
  1330.     rc = fg_read(chip, &err_sts,
  1331.             chip->mem_base + MEM_INTF_IMA_ERR_STS, 1);
  1332.     if (rc) {
  1333.         pr_err("failed to read IMA_ERR_STS, rc=%d\n", rc);
  1334.         return rc;
  1335.     }
  1336.  
  1337.     rc = fg_read(chip, &exp_sts,
  1338.             chip->mem_base + MEM_INTF_IMA_EXP_STS, 1);
  1339.     if (rc) {
  1340.         pr_err("Error in reading IMA_EXP_STS, rc=%d\n", rc);
  1341.         return rc;
  1342.     }
  1343.  
  1344.     rc = fg_read(chip, &hw_sts,
  1345.             chip->mem_base + MEM_INTF_IMA_HW_STS, 1);
  1346.     if (rc) {
  1347.         pr_err("Error in reading IMA_HW_STS, rc=%d\n", rc);
  1348.         return rc;
  1349.     }
  1350.  
  1351.     pr_info_once("Initial ima_err_sts=%x ima_exp_sts=%x ima_hw_sts=%x\n",
  1352.         err_sts, exp_sts, hw_sts);
  1353.  
  1354.     if (fg_debug_mask & (FG_MEM_DEBUG_READS | FG_MEM_DEBUG_WRITES))
  1355.         pr_info("ima_err_sts=%x ima_exp_sts=%x ima_hw_sts=%x\n",
  1356.             err_sts, exp_sts, hw_sts);
  1357.  
  1358.     if (check_hw_sts) {
  1359.         /*
  1360.          * Lower nibble should be equal to upper nibble before SRAM
  1361.          * transactions begins from SW side. If they are unequal, then
  1362.          * the error clear sequence should be run irrespective of IMA
  1363.          * exception errors.
  1364.          */
  1365.         if ((hw_sts & 0x0F) != hw_sts >> 4) {
  1366.             pr_err("IMA HW not in correct state, hw_sts=%x\n",
  1367.                 hw_sts);
  1368.             run_err_clr_seq = true;
  1369.         }
  1370.     }
  1371.  
  1372.     if (exp_sts & (IACS_ERR_BIT | XCT_ERR_BIT | DATA_RD_ERR_BIT |
  1373.         DATA_WR_ERR_BIT | ADDR_BURST_WRAP_BIT | ADDR_RNG_ERR_BIT |
  1374.         ADDR_SRC_ERR_BIT)) {
  1375.         pr_err("IMA exception bit set, exp_sts=%x\n", exp_sts);
  1376.         run_err_clr_seq = true;
  1377.     }
  1378.  
  1379.     if (run_err_clr_seq) {
  1380.         ret = fg_run_iacs_clear_sequence(chip);
  1381.         if (ret) {
  1382.             pr_err("Error clearing IMA exception ret=%d\n", ret);
  1383.             return ret;
  1384.         }
  1385.  
  1386.         if (check_hw_sts)
  1387.             return 0;
  1388.         else
  1389.             return -EAGAIN;
  1390.     }
  1391.  
  1392.     return rc;
  1393. }
  1394.  
  1395. static void fg_enable_irqs(struct fg_chip *chip, bool enable)
  1396. {
  1397.     if (!(enable ^ chip->irqs_enabled))
  1398.         return;
  1399.  
  1400.     if (enable) {
  1401.         enable_irq(chip->soc_irq[DELTA_SOC].irq);
  1402.         enable_irq_wake(chip->soc_irq[DELTA_SOC].irq);
  1403.         if (!chip->full_soc_irq_enabled) {
  1404.             enable_irq(chip->soc_irq[FULL_SOC].irq);
  1405.             enable_irq_wake(chip->soc_irq[FULL_SOC].irq);
  1406.             chip->full_soc_irq_enabled = true;
  1407.         }
  1408.         enable_irq(chip->batt_irq[BATT_MISSING].irq);
  1409.         if (!chip->vbat_low_irq_enabled) {
  1410.             enable_irq(chip->batt_irq[VBATT_LOW].irq);
  1411.             enable_irq_wake(chip->batt_irq[VBATT_LOW].irq);
  1412.             chip->vbat_low_irq_enabled = true;
  1413.         }
  1414.         if (!chip->use_vbat_low_empty_soc) {
  1415.             enable_irq(chip->soc_irq[EMPTY_SOC].irq);
  1416.             enable_irq_wake(chip->soc_irq[EMPTY_SOC].irq);
  1417.         }
  1418.         chip->irqs_enabled = true;
  1419.     } else {
  1420.         disable_irq_wake(chip->soc_irq[DELTA_SOC].irq);
  1421.         disable_irq_nosync(chip->soc_irq[DELTA_SOC].irq);
  1422.         if (chip->full_soc_irq_enabled) {
  1423.             disable_irq_wake(chip->soc_irq[FULL_SOC].irq);
  1424.             disable_irq_nosync(chip->soc_irq[FULL_SOC].irq);
  1425.             chip->full_soc_irq_enabled = false;
  1426.         }
  1427.         disable_irq(chip->batt_irq[BATT_MISSING].irq);
  1428.         if (chip->vbat_low_irq_enabled) {
  1429.             disable_irq_wake(chip->batt_irq[VBATT_LOW].irq);
  1430.             disable_irq_nosync(chip->batt_irq[VBATT_LOW].irq);
  1431.             chip->vbat_low_irq_enabled = false;
  1432.         }
  1433.         if (!chip->use_vbat_low_empty_soc) {
  1434.             disable_irq_wake(chip->soc_irq[EMPTY_SOC].irq);
  1435.             disable_irq_nosync(chip->soc_irq[EMPTY_SOC].irq);
  1436.         }
  1437.         chip->irqs_enabled = false;
  1438.     }
  1439.  
  1440.     if (fg_debug_mask & FG_STATUS)
  1441.         pr_info("FG interrupts are %sabled\n", enable ? "en" : "dis");
  1442. }
  1443.  
  1444. static void fg_check_ima_error_handling(struct fg_chip *chip)
  1445. {
  1446.     if (chip->ima_error_handling) {
  1447.         if (fg_debug_mask & FG_STATUS)
  1448.             pr_info("IMA error is handled already!\n");
  1449.         return;
  1450.     }
  1451.     mutex_lock(&chip->ima_recovery_lock);
  1452.     fg_enable_irqs(chip, false);
  1453.     chip->use_last_cc_soc = true;
  1454.     chip->ima_error_handling = true;
  1455.     if (!work_pending(&chip->ima_error_recovery_work))
  1456.         schedule_work(&chip->ima_error_recovery_work);
  1457.     mutex_unlock(&chip->ima_recovery_lock);
  1458. }
  1459.  
  1460. #define SOC_ALG_ST      0xCF
  1461. #define FGXCT_PRD       BIT(7)
  1462. #define ALG_ST_CHECK_COUNT  20
  1463. static int fg_check_alg_status(struct fg_chip *chip)
  1464. {
  1465.     int rc = 0, timeout = ALG_ST_CHECK_COUNT, count = 0;
  1466.     u8 ima_opr_sts, alg_sts = 0, temp = 0;
  1467.  
  1468.     if (!fg_reset_on_lockup)  {
  1469.         pr_info("FG lockup detection cannot be run\n");
  1470.         return 0;
  1471.     }
  1472.  
  1473.     rc = fg_read(chip, &alg_sts, chip->soc_base + SOC_ALG_ST, 1);
  1474.     if (rc) {
  1475.         pr_err("Error in reading SOC_ALG_ST, rc=%d\n", rc);
  1476.         return rc;
  1477.     }
  1478.  
  1479.     while (1) {
  1480.         rc = fg_read(chip, &ima_opr_sts,
  1481.             chip->mem_base + MEM_INTF_IMA_OPR_STS, 1);
  1482.         if (!rc && !(ima_opr_sts & FGXCT_PRD))
  1483.             break;
  1484.  
  1485.         if (rc) {
  1486.             pr_err("Error in reading IMA_OPR_STS, rc=%d\n",
  1487.                 rc);
  1488.             break;
  1489.         }
  1490.  
  1491.         rc = fg_read(chip, &temp, chip->soc_base + SOC_ALG_ST,
  1492.             1);
  1493.         if (rc) {
  1494.             pr_err("Error in reading SOC_ALG_ST, rc=%d\n",
  1495.                 rc);
  1496.             break;
  1497.         }
  1498.  
  1499.         if ((ima_opr_sts & FGXCT_PRD) && (temp == alg_sts))
  1500.             count++;
  1501.  
  1502.         /* Wait for ~10ms while polling ALG_ST & IMA_OPR_STS */
  1503.         usleep_range(9000, 11000);
  1504.  
  1505.         if (!(--timeout))
  1506.             break;
  1507.     }
  1508.  
  1509.     if (fg_debug_mask & (FG_MEM_DEBUG_READS | FG_MEM_DEBUG_WRITES))
  1510.         pr_info("ima_opr_sts: %x  alg_sts: %x count=%d\n", ima_opr_sts,
  1511.             alg_sts, count);
  1512.  
  1513.     if (count == ALG_ST_CHECK_COUNT) {
  1514.         /* If we are here, that means FG ALG is stuck */
  1515.         pr_err("ALG is stuck\n");
  1516.         fg_check_ima_error_handling(chip);
  1517.         rc = -EBUSY;
  1518.     }
  1519.     return rc;
  1520. }
  1521.  
  1522. static int fg_check_iacs_ready(struct fg_chip *chip)
  1523. {
  1524.     int rc = 0, timeout = 250;
  1525.     u8 ima_opr_sts = 0;
  1526.  
  1527.     /*
  1528.      * Additional delay to make sure IACS ready bit is set after
  1529.      * Read/Write operation.
  1530.      */
  1531.  
  1532.     usleep_range(30, 35);
  1533.     while (1) {
  1534.         rc = fg_read(chip, &ima_opr_sts,
  1535.             chip->mem_base + MEM_INTF_IMA_OPR_STS, 1);
  1536.         if (!rc && (ima_opr_sts & IMA_IACS_RDY)) {
  1537.             break;
  1538.         } else {
  1539.             if (!(--timeout) || rc)
  1540.                 break;
  1541.  
  1542.             /* delay for iacs_ready to be asserted */
  1543.             usleep_range(5000, 7000);
  1544.         }
  1545.     }
  1546.  
  1547.     if (!timeout || rc) {
  1548.         pr_err("IACS_RDY not set, ima_opr_sts: %x\n", ima_opr_sts);
  1549.         rc = fg_check_alg_status(chip);
  1550.         if (rc && rc != -EBUSY)
  1551.             pr_err("Couldn't check FG ALG status, rc=%d\n",
  1552.                 rc);
  1553.         /* perform IACS_CLR sequence */
  1554.         fg_check_ima_exception(chip, false);
  1555.         return -EBUSY;
  1556.     }
  1557.  
  1558.     return 0;
  1559. }
  1560.  
  1561. #define IACS_SLCT           BIT(5)
  1562. static int __fg_interleaved_mem_write(struct fg_chip *chip, u8 *val,
  1563.                 u16 address, int offset, int len)
  1564. {
  1565.     int rc = 0, i;
  1566.     u8 *word = val, byte_enable = 0, num_bytes = 0;
  1567.  
  1568.     if (fg_debug_mask & FG_MEM_DEBUG_WRITES)
  1569.         pr_info("length %d addr=%02X offset=%d\n",
  1570.                     len, address, offset);
  1571.  
  1572.     while (len > 0) {
  1573.         num_bytes = (offset + len) > BUF_LEN ?
  1574.             (BUF_LEN - offset) : len;
  1575.         /* write to byte_enable */
  1576.         for (i = offset; i < (offset + num_bytes); i++)
  1577.             byte_enable |= BIT(i);
  1578.  
  1579.         rc = fg_write(chip, &byte_enable,
  1580.             chip->mem_base + MEM_INTF_IMA_BYTE_EN, 1);
  1581.         if (rc) {
  1582.             pr_err("Unable to write to byte_en_reg rc=%d\n",
  1583.                             rc);
  1584.             return rc;
  1585.         }
  1586.             /* write data */
  1587.         rc = fg_write(chip, word, MEM_INTF_WR_DATA0(chip) + offset,
  1588.                 num_bytes);
  1589.         if (rc) {
  1590.             pr_err("spmi write failed: addr=%03x, rc=%d\n",
  1591.                 MEM_INTF_WR_DATA0(chip) + offset, rc);
  1592.             return rc;
  1593.         }
  1594.         /*
  1595.          * The last-byte WR_DATA3 starts the write transaction.
  1596.          * Write a dummy value to WR_DATA3 if it does not have
  1597.          * valid data. This dummy data is not written to the
  1598.          * SRAM as byte_en for WR_DATA3 is not set.
  1599.          */
  1600.         if (!(byte_enable & BIT(3))) {
  1601.  
  1602.             u8 dummy_byte = 0x0;
  1603.             rc = fg_write(chip, &dummy_byte,
  1604.                 MEM_INTF_WR_DATA0(chip) + 3, 1);
  1605.             if (rc) {
  1606.                 pr_err("Unable to write dummy-data to WR_DATA3 rc=%d\n",
  1607.                                     rc);
  1608.                 return rc;
  1609.             }
  1610.         }
  1611.  
  1612.         rc = fg_check_iacs_ready(chip);
  1613.         if (rc) {
  1614.             pr_err("IACS_RDY failed post write to address %x offset %d rc=%d\n",
  1615.                 address, offset, rc);
  1616.             return rc;
  1617.         }
  1618.  
  1619.         /* check for error condition */
  1620.         rc = fg_check_ima_exception(chip, false);
  1621.         if (rc) {
  1622.             pr_err("IMA transaction failed rc=%d", rc);
  1623.             return rc;
  1624.         }
  1625.  
  1626.         word += num_bytes;
  1627.         len -= num_bytes;
  1628.         offset = byte_enable = 0;
  1629.     }
  1630.  
  1631.     return rc;
  1632. }
  1633.  
  1634. static int __fg_interleaved_mem_read(struct fg_chip *chip, u8 *val, u16 address,
  1635.                         int offset, int len)
  1636. {
  1637.     int rc = 0, total_len;
  1638.     u8 *rd_data = val, num_bytes;
  1639.     char str[DEBUG_PRINT_BUFFER_SIZE];
  1640.  
  1641.     if (fg_debug_mask & FG_MEM_DEBUG_READS)
  1642.         pr_info("length %d addr=%02X\n", len, address);
  1643.  
  1644.     total_len = len;
  1645.     while (len > 0) {
  1646.         num_bytes = (offset + len) > BUF_LEN ? (BUF_LEN - offset) : len;
  1647.         rc = fg_read(chip, rd_data, MEM_INTF_RD_DATA0(chip) + offset,
  1648.                                 num_bytes);
  1649.         if (rc) {
  1650.             pr_err("spmi read failed: addr=%03x, rc=%d\n",
  1651.                 MEM_INTF_RD_DATA0(chip) + offset, rc);
  1652.             return rc;
  1653.         }
  1654.  
  1655.         rd_data += num_bytes;
  1656.         len -= num_bytes;
  1657.         offset = 0;
  1658.  
  1659.         rc = fg_check_iacs_ready(chip);
  1660.         if (rc) {
  1661.             pr_err("IACS_RDY failed post read for address %x offset %d rc=%d\n",
  1662.                 address, offset, rc);
  1663.             return rc;
  1664.         }
  1665.  
  1666.         /* check for error condition */
  1667.         rc = fg_check_ima_exception(chip, false);
  1668.         if (rc) {
  1669.             pr_err("IMA transaction failed rc=%d", rc);
  1670.             return rc;
  1671.         }
  1672.  
  1673.         if (len && (len + offset) < BUF_LEN) {
  1674.             /* move to single mode */
  1675.             u8 intr_ctl = 0;
  1676.  
  1677.             rc = fg_write(chip, &intr_ctl, MEM_INTF_CTL(chip), 1);
  1678.             if (rc) {
  1679.                 pr_err("failed to move to single mode rc=%d\n",
  1680.                                     rc);
  1681.                 return -EIO;
  1682.             }
  1683.         }
  1684.     }
  1685.  
  1686.     if (fg_debug_mask & FG_MEM_DEBUG_READS) {
  1687.         fill_string(str, DEBUG_PRINT_BUFFER_SIZE, val, total_len);
  1688.         pr_info("data: %s\n", str);
  1689.     }
  1690.  
  1691.     return rc;
  1692. }
  1693.  
  1694. #define IMA_REQ_ACCESS      (IACS_SLCT | RIF_MEM_ACCESS_REQ)
  1695. static int fg_interleaved_mem_config(struct fg_chip *chip, u8 *val,
  1696.         u16 address, int len, int offset, int op)
  1697. {
  1698.     int rc = 0;
  1699.     bool rif_mem_sts = true;
  1700.     int time_count = 0;
  1701.  
  1702.     while (1) {
  1703.         rc = fg_check_rif_mem_access(chip, &rif_mem_sts);
  1704.         if (rc)
  1705.             return rc;
  1706.  
  1707.         if (!rif_mem_sts)
  1708.             break;
  1709.  
  1710.         if (fg_debug_mask & (FG_MEM_DEBUG_READS | FG_MEM_DEBUG_WRITES))
  1711.             pr_info("RIF_MEM_ACCESS_REQ is not clear yet for IMA_%s\n",
  1712.                 op ? "write" : "read");
  1713.  
  1714.         /*
  1715.          * Try this no more than 4 times. If RIF_MEM_ACCESS_REQ is not
  1716.          * clear, then return an error instead of waiting for it again.
  1717.          */
  1718.         if  (time_count > 4) {
  1719.             pr_err("Waited for ~16ms polling RIF_MEM_ACCESS_REQ\n");
  1720.             return -ETIMEDOUT;
  1721.         }
  1722.  
  1723.         /* Wait for 4ms before reading RIF_MEM_ACCESS_REQ again */
  1724.         usleep_range(4000, 4100);
  1725.         time_count++;
  1726.     }
  1727.  
  1728.     /* configure for IMA access */
  1729.     rc = fg_masked_write(chip, MEM_INTF_CFG(chip),
  1730.                 IMA_REQ_ACCESS, IMA_REQ_ACCESS, 1);
  1731.     if (rc) {
  1732.         pr_err("failed to set mem access bit rc = %d\n", rc);
  1733.         return rc;
  1734.     }
  1735.  
  1736.     /* configure for the read/write single/burst mode */
  1737.     rc = fg_config_access(chip, op, (offset + len) > 4, 0);
  1738.     if (rc) {
  1739.         pr_err("failed to set configure memory access rc = %d\n", rc);
  1740.         return rc;
  1741.     }
  1742.  
  1743.     rc = fg_check_iacs_ready(chip);
  1744.     if (rc) {
  1745.         pr_err("IACS_RDY failed before setting address: %x offset: %d rc=%d\n",
  1746.             address, offset, rc);
  1747.         return rc;
  1748.     }
  1749.  
  1750.     /* write addresses to the register */
  1751.     rc = fg_set_ram_addr(chip, &address);
  1752.     if (rc) {
  1753.         pr_err("failed to set SRAM address rc = %d\n", rc);
  1754.         return rc;
  1755.     }
  1756.  
  1757.     rc = fg_check_iacs_ready(chip);
  1758.     if (rc)
  1759.         pr_err("IACS_RDY failed after setting address: %x offset: %d rc=%d\n",
  1760.             address, offset, rc);
  1761.  
  1762.     return rc;
  1763. }
  1764.  
  1765. #define MEM_INTF_FG_BEAT_COUNT      0x57
  1766. #define BEAT_COUNT_MASK         0x0F
  1767. #define RETRY_COUNT         3
  1768. static int fg_interleaved_mem_read(struct fg_chip *chip, u8 *val, u16 address,
  1769.                         int len, int offset)
  1770. {
  1771.     int rc = 0, ret, orig_address = address;
  1772.     u8 start_beat_count, end_beat_count, count = 0;
  1773.     bool retry = false;
  1774.  
  1775.     if (chip->fg_shutdown)
  1776.         return -EINVAL;
  1777.  
  1778.     if (offset > 3) {
  1779.         pr_err("offset too large %d\n", offset);
  1780.         return -EINVAL;
  1781.     }
  1782.  
  1783.     fg_stay_awake(&chip->memif_wakeup_source);
  1784.     address = ((orig_address + offset) / 4) * 4;
  1785.     offset = (orig_address + offset) % 4;
  1786.  
  1787.     if (address < RAM_OFFSET) {
  1788.         /*
  1789.          * OTP memory reads need a conventional memory access, do a
  1790.          * conventional read when SRAM offset < RAM_OFFSET.
  1791.          */
  1792.         rc = fg_conventional_mem_read(chip, val, address, len, offset,
  1793.                         0);
  1794.         if (rc)
  1795.             pr_err("Failed to read OTP memory %d\n", rc);
  1796.         goto exit;
  1797.     }
  1798.  
  1799.     mutex_lock(&chip->rw_lock);
  1800.     if (fg_debug_mask & FG_MEM_DEBUG_READS)
  1801.         pr_info("Read for %d bytes is attempted @ 0x%x[%d]\n",
  1802.             len, address, offset);
  1803.  
  1804. retry:
  1805.     if (count >= RETRY_COUNT) {
  1806.         pr_err("Retried reading 3 times\n");
  1807.         retry = false;
  1808.         goto out;
  1809.     }
  1810.  
  1811.     rc = fg_interleaved_mem_config(chip, val, address, offset, len, 0);
  1812.     if (rc) {
  1813.         pr_err("failed to configure SRAM for IMA rc = %d\n", rc);
  1814.         retry = true;
  1815.         count++;
  1816.         goto out;
  1817.     }
  1818.  
  1819.     /* read the start beat count */
  1820.     rc = fg_read(chip, &start_beat_count,
  1821.             chip->mem_base + MEM_INTF_FG_BEAT_COUNT, 1);
  1822.     if (rc) {
  1823.         pr_err("failed to read beat count rc=%d\n", rc);
  1824.         retry = true;
  1825.         count++;
  1826.         goto out;
  1827.     }
  1828.  
  1829.     /* read data */
  1830.     rc = __fg_interleaved_mem_read(chip, val, address, offset, len);
  1831.     if (rc) {
  1832.         count++;
  1833.         if ((rc == -EAGAIN) && (count < RETRY_COUNT)) {
  1834.             pr_err("IMA access failed retry_count = %d\n", count);
  1835.             goto retry;
  1836.         } else {
  1837.             pr_err("failed to read SRAM address rc = %d\n", rc);
  1838.             retry = true;
  1839.             goto out;
  1840.         }
  1841.     }
  1842.  
  1843.     /* read the end beat count */
  1844.     rc = fg_read(chip, &end_beat_count,
  1845.             chip->mem_base + MEM_INTF_FG_BEAT_COUNT, 1);
  1846.     if (rc) {
  1847.         pr_err("failed to read beat count rc=%d\n", rc);
  1848.         retry = true;
  1849.         count++;
  1850.         goto out;
  1851.     }
  1852.  
  1853.     start_beat_count &= BEAT_COUNT_MASK;
  1854.     end_beat_count &= BEAT_COUNT_MASK;
  1855.     if (fg_debug_mask & FG_MEM_DEBUG_READS)
  1856.         pr_info("Start beat_count = %x End beat_count = %x\n",
  1857.                 start_beat_count, end_beat_count);
  1858.     if (start_beat_count != end_beat_count) {
  1859.         if (fg_debug_mask & FG_MEM_DEBUG_READS)
  1860.             pr_info("Beat count do not match - retry transaction\n");
  1861.         retry = true;
  1862.         count++;
  1863.     }
  1864. out:
  1865.     /* Release IMA access */
  1866.     ret = fg_masked_write(chip, MEM_INTF_CFG(chip), IMA_REQ_ACCESS, 0, 1);
  1867.     if (ret)
  1868.         pr_err("failed to reset IMA access bit ret = %d\n", ret);
  1869.  
  1870.     if (retry) {
  1871.         retry = false;
  1872.         goto retry;
  1873.     }
  1874.     mutex_unlock(&chip->rw_lock);
  1875.  
  1876. exit:
  1877.     fg_relax(&chip->memif_wakeup_source);
  1878.     return rc;
  1879. }
  1880.  
  1881. static int fg_interleaved_mem_write(struct fg_chip *chip, u8 *val, u16 address,
  1882.                             int len, int offset)
  1883. {
  1884.     int rc = 0, ret, orig_address = address;
  1885.     u8 count = 0;
  1886.     bool retry = false;
  1887.  
  1888.     if (chip->fg_shutdown)
  1889.         return -EINVAL;
  1890.  
  1891.     if (address < RAM_OFFSET)
  1892.         return -EINVAL;
  1893.  
  1894.     if (offset > 3) {
  1895.         pr_err("offset too large %d\n", offset);
  1896.         return -EINVAL;
  1897.     }
  1898.  
  1899.     fg_stay_awake(&chip->memif_wakeup_source);
  1900.     address = ((orig_address + offset) / 4) * 4;
  1901.     offset = (orig_address + offset) % 4;
  1902.  
  1903.     mutex_lock(&chip->rw_lock);
  1904.     if (fg_debug_mask & FG_MEM_DEBUG_WRITES)
  1905.         pr_info("Write for %d bytes is attempted @ 0x%x[%d]\n",
  1906.             len, address, offset);
  1907.  
  1908. retry:
  1909.     if (count >= RETRY_COUNT) {
  1910.         pr_err("Retried writing 3 times\n");
  1911.         retry = false;
  1912.         goto out;
  1913.     }
  1914.  
  1915.     rc = fg_interleaved_mem_config(chip, val, address, offset, len, 1);
  1916.     if (rc) {
  1917.         pr_err("failed to configure SRAM for IMA rc = %d\n", rc);
  1918.         retry = true;
  1919.         count++;
  1920.         goto out;
  1921.     }
  1922.  
  1923.     /* write data */
  1924.     rc = __fg_interleaved_mem_write(chip, val, address, offset, len);
  1925.     if (rc) {
  1926.         count++;
  1927.         if ((rc == -EAGAIN) && (count < RETRY_COUNT)) {
  1928.             pr_err("IMA access failed retry_count = %d\n", count);
  1929.             goto retry;
  1930.         } else {
  1931.             pr_err("failed to write SRAM address rc = %d\n", rc);
  1932.             retry = true;
  1933.             goto out;
  1934.         }
  1935.     }
  1936.  
  1937. out:
  1938.     /* Release IMA access */
  1939.     ret = fg_masked_write(chip, MEM_INTF_CFG(chip), IMA_REQ_ACCESS, 0, 1);
  1940.     if (ret)
  1941.         pr_err("failed to reset IMA access bit ret = %d\n", ret);
  1942.  
  1943.     if (retry) {
  1944.         retry = false;
  1945.         goto retry;
  1946.     }
  1947.  
  1948.     mutex_unlock(&chip->rw_lock);
  1949.     fg_relax(&chip->memif_wakeup_source);
  1950.     return rc;
  1951. }
  1952.  
  1953. static int fg_mem_read(struct fg_chip *chip, u8 *val, u16 address,
  1954.             int len, int offset, bool keep_access)
  1955. {
  1956.     if (chip->block_sram_access)
  1957.         return -EBUSY;
  1958.  
  1959.     if (chip->ima_supported)
  1960.         return fg_interleaved_mem_read(chip, val, address,
  1961.                         len, offset);
  1962.     else
  1963.         return fg_conventional_mem_read(chip, val, address,
  1964.                     len, offset, keep_access);
  1965. }
  1966.  
  1967. static int fg_mem_write(struct fg_chip *chip, u8 *val, u16 address,
  1968.         int len, int offset, bool keep_access)
  1969. {
  1970.     if (chip->block_sram_access)
  1971.         return -EBUSY;
  1972.  
  1973.     if (chip->ima_supported)
  1974.         return fg_interleaved_mem_write(chip, val, address,
  1975.                         len, offset);
  1976.     else
  1977.         return fg_conventional_mem_write(chip, val, address,
  1978.                     len, offset, keep_access);
  1979. }
  1980.  
  1981. static int fg_mem_masked_write(struct fg_chip *chip, u16 addr,
  1982.         u8 mask, u8 val, u8 offset)
  1983. {
  1984.     int rc = 0;
  1985.     u8 reg[4];
  1986.     char str[DEBUG_PRINT_BUFFER_SIZE];
  1987.  
  1988.     rc = fg_mem_read(chip, reg, addr, 4, 0, 1);
  1989.     if (rc) {
  1990.         pr_err("spmi read failed: addr=%03X, rc=%d\n", addr, rc);
  1991.         return rc;
  1992.     }
  1993.  
  1994.     reg[offset] &= ~mask;
  1995.     reg[offset] |= val & mask;
  1996.  
  1997.     str[0] = '\0';
  1998.     fill_string(str, DEBUG_PRINT_BUFFER_SIZE, reg, 4);
  1999.     pr_debug("Writing %s address %03x, offset %d\n", str, addr, offset);
  2000.  
  2001.     rc = fg_mem_write(chip, reg, addr, 4, 0, 0);
  2002.     if (rc) {
  2003.         pr_err("spmi write failed: addr=%03X, rc=%d\n", addr, rc);
  2004.         return rc;
  2005.     }
  2006.  
  2007.     return rc;
  2008. }
  2009.  
  2010. static u8 sram_backup_buffer[100];
  2011. static int fg_backup_sram_registers(struct fg_chip *chip, bool save)
  2012. {
  2013.     int rc, i, len, offset;
  2014.     u16 address;
  2015.     u8 *ptr;
  2016.  
  2017.     if (fg_debug_mask & FG_STATUS)
  2018.         pr_info("%sing SRAM registers\n", save ? "Back" : "Restor");
  2019.  
  2020.     ptr = sram_backup_buffer;
  2021.     for (i = 0; i < FG_BACKUP_MAX; i++) {
  2022.         address = fg_backup_regs[i].address;
  2023.         offset = fg_backup_regs[i].offset;
  2024.         len = fg_backup_regs[i].len;
  2025.         if (save)
  2026.             rc = fg_interleaved_mem_read(chip, ptr, address,
  2027.                     len, offset);
  2028.         else
  2029.             rc = fg_interleaved_mem_write(chip, ptr, address,
  2030.                     len, offset);
  2031.         if (rc) {
  2032.             pr_err("Error in reading %d bytes from %x[%d], rc=%d\n",
  2033.                 len, address, offset, rc);
  2034.             break;
  2035.         }
  2036.         ptr += len;
  2037.     }
  2038.  
  2039.     return rc;
  2040. }
  2041.  
  2042. #define SOC_FG_RESET    0xF3
  2043. #define RESET_MASK  (BIT(7) | BIT(5))
  2044. static int fg_reset(struct fg_chip *chip, bool reset)
  2045. {
  2046.     int rc;
  2047.  
  2048.     rc = fg_sec_masked_write(chip, chip->soc_base + SOC_FG_RESET,
  2049.         0xFF, reset ? RESET_MASK : 0, 1);
  2050.     if (rc)
  2051.         pr_err("Error in writing to 0x%x, rc=%d\n", SOC_FG_RESET, rc);
  2052.  
  2053.     return rc;
  2054. }
  2055.  
  2056. static void fg_handle_battery_insertion(struct fg_chip *chip)
  2057. {
  2058.     reinit_completion(&chip->batt_id_avail);
  2059.     reinit_completion(&chip->fg_reset_done);
  2060.     schedule_delayed_work(&chip->batt_profile_init, 0);
  2061.     cancel_delayed_work(&chip->update_sram_data);
  2062.     schedule_delayed_work(&chip->update_sram_data, msecs_to_jiffies(0));
  2063. }
  2064.  
  2065. static void batt_to_setpoint_adc(int vbatt_mv, u8 *data)
  2066. {
  2067.     int val;
  2068.     /* Battery voltage is an offset from 0 V and LSB is 1/2^15. */
  2069.     val = DIV_ROUND_CLOSEST(vbatt_mv * 32768, 5000);
  2070.     data[0] = val & 0xFF;
  2071.     data[1] = val >> 8;
  2072.     return;
  2073. }
  2074.  
  2075. static u8 batt_to_setpoint_8b(int vbatt_mv)
  2076. {
  2077.     int val;
  2078.     /* Battery voltage is an offset from 2.5 V and LSB is 5/2^9. */
  2079.     val = (vbatt_mv - 2500) * 512 / 1000;
  2080.     return DIV_ROUND_CLOSEST(val, 5);
  2081. }
  2082.  
  2083. static u8 therm_delay_to_setpoint(u32 delay_us)
  2084. {
  2085.     u8 val;
  2086.  
  2087.     if (delay_us < 2560)
  2088.         val = 0;
  2089.     else if (delay_us > 163840)
  2090.         val = 7;
  2091.     else
  2092.         val = ilog2(delay_us / 10) - 7;
  2093.     return val << 5;
  2094. }
  2095.  
  2096. static int get_current_time(unsigned long *now_tm_sec)
  2097. {
  2098.     struct rtc_time tm;
  2099.     struct rtc_device *rtc;
  2100.     int rc;
  2101.  
  2102.     rtc = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE);
  2103.     if (rtc == NULL) {
  2104.         pr_err("%s: unable to open rtc device (%s)\n",
  2105.             __FILE__, CONFIG_RTC_HCTOSYS_DEVICE);
  2106.         return -EINVAL;
  2107.     }
  2108.  
  2109.     rc = rtc_read_time(rtc, &tm);
  2110.     if (rc) {
  2111.         pr_err("Error reading rtc device (%s) : %d\n",
  2112.             CONFIG_RTC_HCTOSYS_DEVICE, rc);
  2113.         goto close_time;
  2114.     }
  2115.  
  2116.     rc = rtc_valid_tm(&tm);
  2117.     if (rc) {
  2118.         pr_err("Invalid RTC time (%s): %d\n",
  2119.             CONFIG_RTC_HCTOSYS_DEVICE, rc);
  2120.         goto close_time;
  2121.     }
  2122.     rtc_tm_to_time(&tm, now_tm_sec);
  2123.  
  2124. close_time:
  2125.     rtc_class_close(rtc);
  2126.     return rc;
  2127. }
  2128.  
  2129. #define BATTERY_SOC_REG     0x56C
  2130. #define BATTERY_SOC_OFFSET  1
  2131. #define FULL_PERCENT_3B     0xFFFFFF
  2132. static int get_battery_soc_raw(struct fg_chip *chip)
  2133. {
  2134.     int rc;
  2135.     u8 buffer[3];
  2136.  
  2137.     rc = fg_mem_read(chip, buffer, BATTERY_SOC_REG, 3, 1, 0);
  2138.     if (rc) {
  2139.         pr_err("Unable to read battery soc: %d\n", rc);
  2140.         return 0;
  2141.     }
  2142.     return (int)(buffer[2] << 16 | buffer[1] << 8 | buffer[0]);
  2143. }
  2144.  
  2145. #define COUNTER_IMPTR_REG   0X558
  2146. #define COUNTER_PULSE_REG   0X55C
  2147. #define SOC_FULL_REG        0x564
  2148. #define COUNTER_IMPTR_OFFSET    2
  2149. #define COUNTER_PULSE_OFFSET    0
  2150. #define SOC_FULL_OFFSET     3
  2151. #define ESR_PULSE_RECONFIG_SOC  0xFFF971
  2152. static int fg_configure_soc(struct fg_chip *chip)
  2153. {
  2154.     u32 batt_soc;
  2155.     u8 cntr[2] = {0, 0};
  2156.     int rc = 0;
  2157.  
  2158.     mutex_lock(&chip->rw_lock);
  2159.     atomic_add_return(1, &chip->memif_user_cnt);
  2160.     mutex_unlock(&chip->rw_lock);
  2161.  
  2162.     /* Read Battery SOC */
  2163.     batt_soc = get_battery_soc_raw(chip);
  2164.  
  2165.     if (batt_soc > ESR_PULSE_RECONFIG_SOC) {
  2166.         if (fg_debug_mask & FG_POWER_SUPPLY)
  2167.             pr_info("Configuring soc registers batt_soc: %x\n",
  2168.                 batt_soc);
  2169.         batt_soc = ESR_PULSE_RECONFIG_SOC;
  2170.         rc = fg_mem_write(chip, (u8 *)&batt_soc, BATTERY_SOC_REG, 3,
  2171.                 BATTERY_SOC_OFFSET, 1);
  2172.         if (rc) {
  2173.             pr_err("failed to write BATT_SOC rc=%d\n", rc);
  2174.             goto out;
  2175.         }
  2176.  
  2177.         rc = fg_mem_write(chip, (u8 *)&batt_soc, SOC_FULL_REG, 3,
  2178.                 SOC_FULL_OFFSET, 1);
  2179.         if (rc) {
  2180.             pr_err("failed to write SOC_FULL rc=%d\n", rc);
  2181.             goto out;
  2182.         }
  2183.  
  2184.         rc = fg_mem_write(chip, cntr, COUNTER_IMPTR_REG, 2,
  2185.                 COUNTER_IMPTR_OFFSET, 1);
  2186.         if (rc) {
  2187.             pr_err("failed to write COUNTER_IMPTR rc=%d\n", rc);
  2188.             goto out;
  2189.         }
  2190.  
  2191.         rc = fg_mem_write(chip, cntr, COUNTER_PULSE_REG, 2,
  2192.                 COUNTER_PULSE_OFFSET, 0);
  2193.         if (rc)
  2194.             pr_err("failed to write COUNTER_IMPTR rc=%d\n", rc);
  2195.     }
  2196. out:
  2197.     fg_release_access_if_necessary(chip);
  2198.     return rc;
  2199. }
  2200.  
  2201. #define VBATT_LOW_STS_BIT BIT(2)
  2202. static int fg_get_vbatt_status(struct fg_chip *chip, bool *vbatt_low_sts)
  2203. {
  2204.     int rc = 0;
  2205.     u8 fg_batt_sts;
  2206.  
  2207.     rc = fg_read(chip, &fg_batt_sts, INT_RT_STS(chip->batt_base), 1);
  2208.     if (rc)
  2209.         pr_err("spmi read failed: addr=%03X, rc=%d\n",
  2210.                 INT_RT_STS(chip->batt_base), rc);
  2211.     else
  2212.         *vbatt_low_sts = !!(fg_batt_sts & VBATT_LOW_STS_BIT);
  2213.  
  2214.     return rc;
  2215. }
  2216.  
  2217. #define SOC_EMPTY   BIT(3)
  2218. static bool fg_is_batt_empty(struct fg_chip *chip)
  2219. {
  2220.     u8 fg_soc_sts;
  2221.     int rc;
  2222.     bool vbatt_low_sts;
  2223.  
  2224.     if (chip->use_vbat_low_empty_soc) {
  2225.         if (fg_get_vbatt_status(chip, &vbatt_low_sts))
  2226.             return false;
  2227.  
  2228.         return vbatt_low_sts;
  2229.     }
  2230.  
  2231.     rc = fg_read(chip, &fg_soc_sts, INT_RT_STS(chip->soc_base), 1);
  2232.     if (rc) {
  2233.         pr_err("spmi read failed: addr=%03X, rc=%d\n",
  2234.                 INT_RT_STS(chip->soc_base), rc);
  2235.         return false;
  2236.     }
  2237.  
  2238.     return (fg_soc_sts & SOC_EMPTY) != 0;
  2239. }
  2240.  
  2241. static int get_monotonic_soc_raw(struct fg_chip *chip)
  2242. {
  2243.     u8 cap[2];
  2244.     int rc, tries = 0;
  2245.  
  2246.     while (tries < MAX_TRIES_SOC) {
  2247.         rc = fg_read(chip, cap,
  2248.                 chip->soc_base + SOC_MONOTONIC_SOC, 2);
  2249.         if (rc) {
  2250.             pr_err("spmi read failed: addr=%03x, rc=%d\n",
  2251.                 chip->soc_base + SOC_MONOTONIC_SOC, rc);
  2252.             return rc;
  2253.         }
  2254.  
  2255.         if (cap[0] == cap[1])
  2256.             break;
  2257.  
  2258.         tries++;
  2259.     }
  2260.  
  2261.     if (tries == MAX_TRIES_SOC) {
  2262.         pr_err("shadow registers do not match\n");
  2263.         return -EINVAL;
  2264.     }
  2265.  
  2266.     if (fg_debug_mask & FG_POWER_SUPPLY)
  2267.         pr_info_ratelimited("raw: 0x%02x\n", cap[0]);
  2268.     return cap[0];
  2269. }
  2270.  
  2271. #define EMPTY_CAPACITY      0
  2272. #define DEFAULT_CAPACITY    -1  /* Negative value if no profile is loaded. */
  2273. #define MISSING_CAPACITY    100
  2274. #define FULL_CAPACITY       100
  2275. #define FULL_SOC_RAW        0xFF
  2276. static int get_prop_capacity(struct fg_chip *chip)
  2277. {
  2278.     int msoc, rc;
  2279.     bool vbatt_low_sts;
  2280.  
  2281.     if (chip->use_last_soc && chip->last_soc) {
  2282.         if (chip->last_soc == FULL_SOC_RAW)
  2283.             return FULL_CAPACITY;
  2284.         return DIV_ROUND_CLOSEST((chip->last_soc - 1) *
  2285.                 (FULL_CAPACITY - 2),
  2286.                 FULL_SOC_RAW - 2) + 1;
  2287.     }
  2288.  
  2289.     if (chip->battery_missing)
  2290.         return MISSING_CAPACITY;
  2291.  
  2292.     if (!chip->profile_loaded && !chip->use_otp_profile)
  2293.         return DEFAULT_CAPACITY;
  2294.  
  2295.     if (chip->charge_full)
  2296.         return FULL_CAPACITY;
  2297.  
  2298.     if (chip->soc_empty) {
  2299.         if (fg_debug_mask & FG_POWER_SUPPLY)
  2300.             pr_info_ratelimited("capacity: %d, EMPTY\n",
  2301.                     EMPTY_CAPACITY);
  2302.         return EMPTY_CAPACITY;
  2303.     }
  2304.  
  2305.     msoc = get_monotonic_soc_raw(chip);
  2306.     if (msoc == 0) {
  2307.         if (fg_reset_on_lockup && chip->use_vbat_low_empty_soc) {
  2308.             rc = fg_get_vbatt_status(chip, &vbatt_low_sts);
  2309.             if (rc) {
  2310.                 pr_err("Error in reading vbatt_status, rc=%d\n",
  2311.                     rc);
  2312.                 return EMPTY_CAPACITY;
  2313.             }
  2314.  
  2315.             if (!vbatt_low_sts)
  2316.                 return DIV_ROUND_CLOSEST((chip->last_soc - 1) *
  2317.                         (FULL_CAPACITY - 2),
  2318.                         FULL_SOC_RAW - 2) + 1;
  2319.             else
  2320.                 return EMPTY_CAPACITY;
  2321.         } else {
  2322.             return EMPTY_CAPACITY;
  2323.         }
  2324.     } else if (msoc == FULL_SOC_RAW) {
  2325.         return FULL_CAPACITY;
  2326.     }
  2327.  
  2328.     return DIV_ROUND_CLOSEST((msoc - 1) * (FULL_CAPACITY - 2),
  2329.             FULL_SOC_RAW - 2) + 1;
  2330. }
  2331.  
  2332. #define HIGH_BIAS   3
  2333. #define MED_BIAS    BIT(1)
  2334. #define LOW_BIAS    BIT(0)
  2335. static u8 bias_ua[] = {
  2336.     [HIGH_BIAS] = 150,
  2337.     [MED_BIAS] = 15,
  2338.     [LOW_BIAS] = 5,
  2339. };
  2340.  
  2341. static int64_t get_batt_id(unsigned int battery_id_uv, u8 bid_info)
  2342. {
  2343.     u64 battery_id_ohm;
  2344.  
  2345.     if ((bid_info & 0x3) == 0) {
  2346.         pr_err("can't determine battery id 0x%02x\n", bid_info);
  2347.         return -EINVAL;
  2348.     }
  2349.  
  2350.     battery_id_ohm = div_u64(battery_id_uv, bias_ua[bid_info & 0x3]);
  2351.  
  2352.     return battery_id_ohm;
  2353. }
  2354.  
  2355. #define DEFAULT_TEMP_DEGC   250
  2356. static int get_sram_prop_now(struct fg_chip *chip, unsigned int type)
  2357. {
  2358.     if (fg_debug_mask & FG_POWER_SUPPLY)
  2359.         pr_info("addr 0x%02X, offset %d value %d\n",
  2360.             fg_data[type].address, fg_data[type].offset,
  2361.             fg_data[type].value);
  2362.  
  2363.     if (type == FG_DATA_BATT_ID)
  2364.         return get_batt_id(fg_data[type].value,
  2365.                 fg_data[FG_DATA_BATT_ID_INFO].value);
  2366.  
  2367.     if (type == FG_DATA_BATT_TEMP && chip->low_batt_temp_comp) {
  2368.         int cool = settings[FG_MEM_SOFT_COLD].value;
  2369.         int cold = settings[FG_MEM_HARD_COLD].value;
  2370.         int temp = fg_data[type].value;
  2371.  
  2372.         if (temp < 0)
  2373.             temp += (-50) * (cool - temp) / (cool - cold);
  2374.         return temp;
  2375.     }
  2376.  
  2377.     return fg_data[type].value;
  2378. }
  2379.  
  2380. #define MIN_TEMP_DEGC   -300
  2381. #define MAX_TEMP_DEGC   970
  2382. static int get_prop_jeita_temp(struct fg_chip *chip, unsigned int type)
  2383. {
  2384.     if (fg_debug_mask & FG_POWER_SUPPLY)
  2385.         pr_info("addr 0x%02X, offset %d\n", settings[type].address,
  2386.             settings[type].offset);
  2387.  
  2388.     return settings[type].value;
  2389. }
  2390.  
  2391. static int set_prop_jeita_temp(struct fg_chip *chip,
  2392.                 unsigned int type, int decidegc)
  2393. {
  2394.     int rc = 0;
  2395.  
  2396.     if (fg_debug_mask & FG_POWER_SUPPLY)
  2397.         pr_info("addr 0x%02X, offset %d temp%d\n",
  2398.             settings[type].address,
  2399.             settings[type].offset, decidegc);
  2400.  
  2401.     settings[type].value = decidegc;
  2402.  
  2403.     cancel_delayed_work_sync(
  2404.         &chip->update_jeita_setting);
  2405.     schedule_delayed_work(
  2406.         &chip->update_jeita_setting, 0);
  2407.  
  2408.     return rc;
  2409. }
  2410.  
  2411. #define EXTERNAL_SENSE_SELECT       0x4AC
  2412. #define EXTERNAL_SENSE_OFFSET       0x2
  2413. #define EXTERNAL_SENSE_BIT      BIT(2)
  2414. static int set_prop_sense_type(struct fg_chip *chip, int ext_sense_type)
  2415. {
  2416.     int rc;
  2417.  
  2418.     rc = fg_mem_masked_write(chip, EXTERNAL_SENSE_SELECT,
  2419.             EXTERNAL_SENSE_BIT,
  2420.             ext_sense_type ? EXTERNAL_SENSE_BIT : 0,
  2421.             EXTERNAL_SENSE_OFFSET);
  2422.     if (rc) {
  2423.         pr_err("failed to write profile rc=%d\n", rc);
  2424.         return rc;
  2425.     }
  2426.  
  2427.     return 0;
  2428. }
  2429.  
  2430. #define IGNORE_FALSE_NEGATIVE_ISENSE_BIT    BIT(3)
  2431. static int set_prop_ignore_false_negative_isense(struct fg_chip *chip,
  2432.                             bool ignore)
  2433. {
  2434.     int rc;
  2435.  
  2436.     rc = fg_mem_masked_write(chip, EXTERNAL_SENSE_SELECT,
  2437.             IGNORE_FALSE_NEGATIVE_ISENSE_BIT,
  2438.             ignore ? IGNORE_FALSE_NEGATIVE_ISENSE_BIT : 0,
  2439.             EXTERNAL_SENSE_OFFSET);
  2440.     if (rc) {
  2441.         pr_err("failed to %s isense false negative ignore rc=%d\n",
  2442.                 ignore ? "enable" : "disable", rc);
  2443.         return rc;
  2444.     }
  2445.  
  2446.     return 0;
  2447. }
  2448.  
  2449. #define EXPONENT_MASK       0xF800
  2450. #define MANTISSA_MASK       0x3FF
  2451. #define SIGN            BIT(10)
  2452. #define EXPONENT_SHIFT      11
  2453. #define MICRO_UNIT      1000000ULL
  2454. static int64_t float_decode(u16 reg)
  2455. {
  2456.     int64_t final_val, exponent_val, mantissa_val;
  2457.     int exponent, mantissa, n;
  2458.     bool sign;
  2459.  
  2460.     exponent = (reg & EXPONENT_MASK) >> EXPONENT_SHIFT;
  2461.     mantissa = (reg & MANTISSA_MASK);
  2462.     sign = !!(reg & SIGN);
  2463.  
  2464.     pr_debug("exponent=%d mantissa=%d sign=%d\n", exponent, mantissa, sign);
  2465.  
  2466.     mantissa_val = mantissa * MICRO_UNIT;
  2467.  
  2468.     n = exponent - 15;
  2469.     if (n < 0)
  2470.         exponent_val = MICRO_UNIT >> -n;
  2471.     else
  2472.         exponent_val = MICRO_UNIT << n;
  2473.  
  2474.     n = n - 10;
  2475.     if (n < 0)
  2476.         mantissa_val >>= -n;
  2477.     else
  2478.         mantissa_val <<= n;
  2479.  
  2480.     final_val = exponent_val + mantissa_val;
  2481.  
  2482.     if (sign)
  2483.         final_val *= -1;
  2484.  
  2485.     return final_val;
  2486. }
  2487.  
  2488. #define MIN_HALFFLOAT_EXP_N     -15
  2489. #define MAX_HALFFLOAT_EXP_N      16
  2490. static int log2_floor(int64_t uval)
  2491. {
  2492.     int n = 0;
  2493.     int64_t i = MICRO_UNIT;
  2494.  
  2495.     if (uval > i) {
  2496.         while (uval > i && n > MIN_HALFFLOAT_EXP_N) {
  2497.             i <<= 1;
  2498.             n += 1;
  2499.         }
  2500.         if (uval < i)
  2501.             n -= 1;
  2502.     } else if (uval < i) {
  2503.         while (uval < i && n < MAX_HALFFLOAT_EXP_N) {
  2504.             i >>= 1;
  2505.             n -= 1;
  2506.         }
  2507.     }
  2508.  
  2509.     return n;
  2510. }
  2511.  
  2512. static int64_t exp2_int(int64_t n)
  2513. {
  2514.     int p = n - 1;
  2515.  
  2516.     if (p > 0)
  2517.         return (2 * MICRO_UNIT) << p;
  2518.     else
  2519.         return (2 * MICRO_UNIT) >> abs(p);
  2520. }
  2521.  
  2522. static u16 float_encode(int64_t uval)
  2523. {
  2524.     int sign = 0, n, exp, mantissa;
  2525.     u16 half = 0;
  2526.  
  2527.     if (uval < 0) {
  2528.         sign = 1;
  2529.         uval = abs(uval);
  2530.     }
  2531.     n = log2_floor(uval);
  2532.     exp = n + 15;
  2533.     mantissa = div_s64(div_s64((uval - exp2_int(n)) * exp2_int(10 - n),
  2534.                 MICRO_UNIT) + MICRO_UNIT / 2, MICRO_UNIT);
  2535.  
  2536.     half = (mantissa & MANTISSA_MASK) | ((sign << 10) & SIGN)
  2537.         | ((exp << 11) & EXPONENT_MASK);
  2538.  
  2539.     if (fg_debug_mask & FG_STATUS)
  2540.         pr_info("uval = %lld, m = 0x%02x, sign = 0x%02x, exp = 0x%02x, half = 0x%04x\n",
  2541.                 uval, mantissa, sign, exp, half);
  2542.     return half;
  2543. }
  2544.  
  2545. #define BATT_IDED   BIT(3)
  2546. static int fg_is_batt_id_valid(struct fg_chip *chip)
  2547. {
  2548.     u8 fg_batt_sts;
  2549.     int rc;
  2550.  
  2551.     rc = fg_read(chip, &fg_batt_sts,
  2552.                  INT_RT_STS(chip->batt_base), 1);
  2553.     if (rc) {
  2554.         pr_err("spmi read failed: addr=%03X, rc=%d\n",
  2555.                 INT_RT_STS(chip->batt_base), rc);
  2556.         return rc;
  2557.     }
  2558.  
  2559.     pr_debug("fg batt sts 0x%x\n", fg_batt_sts);
  2560.  
  2561.     return (fg_batt_sts & BATT_IDED) ? 1 : 0;
  2562. }
  2563.  
  2564. static int64_t twos_compliment_extend(int64_t val, int nbytes)
  2565. {
  2566.     int i;
  2567.     int64_t mask;
  2568.  
  2569.     mask = 0x80LL << ((nbytes - 1) * 8);
  2570.     if (val & mask) {
  2571.         for (i = 8; i > nbytes; i--) {
  2572.             mask = 0xFFLL << ((i - 1) * 8);
  2573.             val |= mask;
  2574.         }
  2575.     }
  2576.  
  2577.     return val;
  2578. }
  2579.  
  2580. #define SLOPE_LIMITER_COEFF_REG     0x430
  2581. #define SLOPE_LIMITER_COEFF_OFFSET  3
  2582. #define SLOPE_LIMITER_COEFF_LEN     2
  2583. #define SLOPE_LIMITER_COEFF_DEFAULT 0xA1A0
  2584. #define LSB_24B_NUMRTR      596046
  2585. #define LSB_24B_DENMTR      1000000
  2586. #define LSB_16B_NUMRTR      152587
  2587. #define LSB_16B_DENMTR      1000
  2588. #define LSB_8B      9800
  2589. #define TEMP_LSB_16B    625
  2590. #define DECIKELVIN  2730
  2591. #define SRAM_PERIOD_NO_ID_UPDATE_MS 100
  2592. #define FULL_PERCENT_28BIT      0xFFFFFFF
  2593. static int update_sram_data(struct fg_chip *chip, int *resched_ms)
  2594. {
  2595.     int i, j, rc = 0;
  2596.     u8 reg[4];
  2597.     int64_t temp;
  2598.     int battid_valid = fg_is_batt_id_valid(chip);
  2599.  
  2600.     fg_stay_awake(&chip->update_sram_wakeup_source);
  2601.     if (chip->fg_restarting)
  2602.         goto resched;
  2603.  
  2604.     fg_mem_lock(chip);
  2605.     for (i = 1; i < FG_DATA_MAX; i++) {
  2606.         if (chip->profile_loaded && i >= FG_DATA_BATT_ID)
  2607.             continue;
  2608.         rc = fg_mem_read(chip, reg, fg_data[i].address,
  2609.             fg_data[i].len, fg_data[i].offset, 0);
  2610.         if (rc) {
  2611.             pr_err("Failed to update sram data\n");
  2612.             break;
  2613.         }
  2614.  
  2615.         temp = 0;
  2616.         for (j = 0; j < fg_data[i].len; j++)
  2617.             temp |= reg[j] << (8 * j);
  2618.  
  2619.         switch (i) {
  2620.         case FG_DATA_OCV:
  2621.         case FG_DATA_VOLTAGE:
  2622.         case FG_DATA_CPRED_VOLTAGE:
  2623.             fg_data[i].value = div_u64(
  2624.                     (u64)(u16)temp * LSB_16B_NUMRTR,
  2625.                     LSB_16B_DENMTR);
  2626.             break;
  2627.         case FG_DATA_CURRENT:
  2628.             temp = twos_compliment_extend(temp, fg_data[i].len);
  2629.             fg_data[i].value = div_s64(
  2630.                     (s64)temp * LSB_16B_NUMRTR,
  2631.                     LSB_16B_DENMTR);
  2632.             break;
  2633.         case FG_DATA_BATT_ESR:
  2634.             fg_data[i].value = float_decode((u16) temp);
  2635.             break;
  2636.         case FG_DATA_BATT_ESR_COUNT:
  2637.             fg_data[i].value = (u16)temp;
  2638.             break;
  2639.         case FG_DATA_BATT_ID:
  2640.             if (battid_valid)
  2641.                 fg_data[i].value = reg[0] * LSB_8B;
  2642.             break;
  2643.         case FG_DATA_BATT_ID_INFO:
  2644.             if (battid_valid)
  2645.                 fg_data[i].value = reg[0];
  2646.             break;
  2647.         case FG_DATA_BATT_SOC:
  2648.             fg_data[i].value = div64_s64((temp * 10000),
  2649.                             FULL_PERCENT_3B);
  2650.             break;
  2651.         case FG_DATA_CC_CHARGE:
  2652.             temp = twos_compliment_extend(temp, fg_data[i].len);
  2653.             fg_data[i].value = div64_s64(
  2654.                     temp * (int64_t)chip->nom_cap_uah,
  2655.                     FULL_PERCENT_28BIT);
  2656.             break;
  2657.         case FG_DATA_VINT_ERR:
  2658.             temp = twos_compliment_extend(temp, fg_data[i].len);
  2659.             fg_data[i].value = div64_s64(temp * chip->nom_cap_uah,
  2660.                     FULL_PERCENT_3B);
  2661.             break;
  2662.         };
  2663.  
  2664.         if (fg_debug_mask & FG_MEM_DEBUG_READS)
  2665.             pr_info("%d %lld %d\n", i, temp, fg_data[i].value);
  2666.     }
  2667.  
  2668.     if (!chip->soc_slope_limiter_en) {
  2669.         rc = fg_mem_read(chip, reg, SLOPE_LIMITER_COEFF_REG,
  2670.                  SLOPE_LIMITER_COEFF_LEN,
  2671.                  SLOPE_LIMITER_COEFF_OFFSET, 0);
  2672.         if (rc)
  2673.             pr_err("Failed to check Slope Limit\n");
  2674.         temp = 0;
  2675.         for (j = 0; j < SLOPE_LIMITER_COEFF_LEN; j++)
  2676.             temp |= reg[j] << (8 * j);
  2677.         if (temp != SLOPE_LIMITER_COEFF_DEFAULT) {
  2678.             pr_err("Slope Limit Broken data=0x%X! Resetting FG\n",
  2679.                    (int)temp);
  2680.             fg_check_ima_error_handling(chip);
  2681.             goto out;
  2682.         }
  2683.     }
  2684.  
  2685.     fg_mem_release(chip);
  2686.  
  2687.     /* Backup the registers whenever no error happens during update */
  2688.     if (fg_reset_on_lockup && !chip->ima_error_handling) {
  2689.         if (!rc) {
  2690.             if (fg_debug_mask & FG_STATUS)
  2691.                 pr_info("backing up SRAM registers\n");
  2692.             rc = fg_backup_sram_registers(chip, true);
  2693.             if (rc) {
  2694.                 pr_err("Couldn't save sram registers\n");
  2695.                 goto out;
  2696.             }
  2697.             if (!chip->use_last_soc) {
  2698.                 chip->last_soc = get_monotonic_soc_raw(chip);
  2699.                 chip->last_cc_soc = div64_s64(
  2700.                     (int64_t)chip->last_soc *
  2701.                     FULL_PERCENT_28BIT, FULL_SOC_RAW);
  2702.             }
  2703.             if (fg_debug_mask & FG_STATUS)
  2704.                 pr_info("last_soc: %d last_cc_soc: %lld\n",
  2705.                     chip->last_soc, chip->last_cc_soc);
  2706.         } else {
  2707.             pr_err("update_sram failed\n");
  2708.             goto out;
  2709.         }
  2710.     }
  2711.  
  2712.     if (!rc)
  2713.         get_current_time(&chip->last_sram_update_time);
  2714.  
  2715. resched:
  2716.     if (battid_valid) {
  2717.         complete_all(&chip->batt_id_avail);
  2718.         *resched_ms = fg_sram_update_period_ms;
  2719.     } else {
  2720.         *resched_ms = SRAM_PERIOD_NO_ID_UPDATE_MS;
  2721.     }
  2722. out:
  2723.     fg_relax(&chip->update_sram_wakeup_source);
  2724.     return rc;
  2725. }
  2726.  
  2727. // Read the beat count and write it into the beat_count arg;
  2728. // return non-zero on failure.
  2729. static int read_beat(struct fg_chip *chip, u8 *beat_count)
  2730. {
  2731.     int rc = fg_read(chip, beat_count,
  2732.              chip->mem_base + MEM_INTF_FG_BEAT_COUNT, 1);
  2733.     if (rc)
  2734.         pr_err("failed to read beat count rc=%d\n", rc);
  2735.     else if (fg_debug_mask & FG_STATUS)
  2736.         pr_info("current: %d, prev: %d\n", *beat_count,
  2737.             chip->last_beat_count);
  2738.     return rc;
  2739. }
  2740. /*
  2741.  * The FG_ALG_SYSCTL_1 word contains control bits for the
  2742.  * fuel-gauge algorithm, most of whose effect are not
  2743.  * publicly disclosed. The low nibble of 0x4B3 contains
  2744.  * bits to control whether an IRQ is raised on low-battery
  2745.  * conditions, as well as a debug bit (bit 3) that forces
  2746.  * a delta-soc interrupt on every fuel-gauge cycle.
  2747.  * The value in FG_ALG_SYSCTL_1_DFLT is the recommended
  2748.  * default configuration with the IRQ's enabled.
  2749.  */
  2750. #define FG_ALG_SYSCTL_1     0x4B0
  2751. #define FG_ALG_SYSCTL_1_DFLT    0x870C7999
  2752. static int fg_check_system_config(struct fg_chip *chip)
  2753. {
  2754.     int rc;
  2755.     u32 buf;
  2756.     if (!chip->ima_supported)
  2757.         return 0;
  2758.     rc = fg_mem_read(chip, (u8 *)&buf, FG_ALG_SYSCTL_1, 4, 0, 0);
  2759.     if (rc) {
  2760.         pr_err("Failed to read 0x4B0-3 rc=%d\n", rc);
  2761.         return rc;
  2762.     }
  2763.     if (fg_debug_mask & FG_STATUS)
  2764.         pr_info("FG_ALG_SYSCTL_1: %x\n", buf);
  2765.     if (buf != FG_ALG_SYSCTL_1_DFLT) {
  2766.         pr_err("FG_ALG_SYSCTL_1 corrupted? buf: %x\n", buf);
  2767.         buf = FG_ALG_SYSCTL_1_DFLT;
  2768.         rc = fg_mem_write(chip, (u8 *)&buf, FG_ALG_SYSCTL_1, 4, 0, 0);
  2769.         if (rc) {
  2770.             pr_err("Failed to write 0x4B0-3 rc=%d\n", rc);
  2771.             return rc;
  2772.         }
  2773.     }
  2774.     return rc;
  2775. }
  2776.  
  2777. #define SANITY_CHECK_PERIOD_MS  5000
  2778. static void check_sanity_work(struct work_struct *work)
  2779. {
  2780.     struct fg_chip *chip = container_of(work,
  2781.                 struct fg_chip,
  2782.                 check_sanity_work.work);
  2783.     int rc = 0;
  2784.     u8 beat_count;
  2785.     bool tried_once = false;
  2786.  
  2787.     rc = fg_check_system_config(chip);
  2788.     if (rc)
  2789.         pr_err("Failed to check system config rc=%d\n", rc);
  2790.  
  2791.     // Try one beat check once up-front to avoid the common
  2792.     // case where the beat has changed and we don't need to hold
  2793.     // the chip awake.
  2794.     rc = read_beat(chip, &beat_count);
  2795.     if (rc == 0 && chip->last_beat_count != beat_count) {
  2796.         chip->last_beat_count = beat_count;
  2797.         schedule_delayed_work(
  2798.             &chip->check_sanity_work,
  2799.             msecs_to_jiffies(SANITY_CHECK_PERIOD_MS));
  2800.         return;
  2801.     }
  2802.  
  2803.     fg_stay_awake(&chip->sanity_wakeup_source);
  2804.  
  2805. try_again:
  2806.     rc = fg_read(chip, &beat_count,
  2807.             chip->mem_base + MEM_INTF_FG_BEAT_COUNT, 1);
  2808.     if (rc) {
  2809.         pr_err("failed to read beat count rc=%d\n", rc);
  2810.         goto resched;
  2811.     }
  2812.  
  2813.     if (fg_debug_mask & FG_STATUS)
  2814.         pr_info("current: %d, prev: %d\n", beat_count,
  2815.             chip->last_beat_count);
  2816.  
  2817.     if (chip->last_beat_count == beat_count) {
  2818.         if (!tried_once) {
  2819.             /* Wait for 1 FG cycle and read it once again */
  2820.             msleep(1500);
  2821.             tried_once = true;
  2822.             goto try_again;
  2823.         } else {
  2824.             pr_err("Beat count not updating\n");
  2825.             fg_check_ima_error_handling(chip);
  2826.             goto out;
  2827.         }
  2828.     } else {
  2829.         chip->last_beat_count = beat_count;
  2830.     }
  2831. resched:
  2832.     schedule_delayed_work(
  2833.         &chip->check_sanity_work,
  2834.         msecs_to_jiffies(SANITY_CHECK_PERIOD_MS));
  2835. out:
  2836.     fg_relax(&chip->sanity_wakeup_source);
  2837. }
  2838.  
  2839. /*
  2840.  *function for read real-time vbat and ibat from register
  2841.  */
  2842. static int get_real_time_prop_value(struct fg_chip *chip, unsigned int type)
  2843. {
  2844.     int ret = -1;
  2845.     cancel_delayed_work(&chip->update_sram_data);
  2846.     reinit_completion(&chip->fg_sram_updating_done);
  2847.     schedule_delayed_work(&chip->update_sram_data,
  2848.         msecs_to_jiffies(0));
  2849.     /*make sure we got the latest updated data. and make sure never hold the process too long.*/
  2850.     ret = wait_for_completion_timeout(//never interruptable
  2851.         &chip->fg_sram_updating_done,
  2852.         msecs_to_jiffies(10));
  2853.     return fg_data[type].value;
  2854. }
  2855.  
  2856. #define SRAM_TIMEOUT_MS         3000
  2857. static void update_sram_data_work(struct work_struct *work)
  2858. {
  2859.     struct fg_chip *chip = container_of(work,
  2860.                 struct fg_chip,
  2861.                 update_sram_data.work);
  2862.     int resched_ms, ret;
  2863.     bool tried_again = false;
  2864.     int rc = 0;
  2865.  
  2866. wait:
  2867.     /* Wait for MEMIF access revoked */
  2868.     ret = wait_for_completion_interruptible_timeout(
  2869.             &chip->sram_access_revoked,
  2870.             msecs_to_jiffies(SRAM_TIMEOUT_MS));
  2871.  
  2872.     /* If we were interrupted wait again one more time. */
  2873.     if (ret == -ERESTARTSYS && !tried_again) {
  2874.         tried_again = true;
  2875.         goto wait;
  2876.     } else if (ret <= 0) {
  2877.         pr_err("transaction timed out ret=%d\n", ret);
  2878.         if (fg_is_batt_id_valid(chip))
  2879.             resched_ms = fg_sram_update_period_ms;
  2880.         else
  2881.             resched_ms = SRAM_PERIOD_NO_ID_UPDATE_MS;
  2882.         goto out;
  2883.     }
  2884.     rc = update_sram_data(chip, &resched_ms);
  2885.  
  2886.     complete(&chip->fg_sram_updating_done); // inform the real time handler, data updating got done.
  2887. out:
  2888.     if (!rc)
  2889.         schedule_delayed_work(
  2890.             &chip->update_sram_data,
  2891.             msecs_to_jiffies(resched_ms));
  2892. }
  2893.  
  2894. #define BATT_TEMP_OFFSET    3
  2895. #define BATT_TEMP_CNTRL_MASK    0x17
  2896. #define DISABLE_THERM_BIT   BIT(0)
  2897. #define TEMP_SENSE_ALWAYS_BIT   BIT(1)
  2898. #define TEMP_SENSE_CHARGE_BIT   BIT(2)
  2899. #define FORCE_RBIAS_ON_BIT  BIT(4)
  2900. #define BATT_TEMP_OFF       DISABLE_THERM_BIT
  2901. #define BATT_TEMP_ON        (FORCE_RBIAS_ON_BIT | TEMP_SENSE_ALWAYS_BIT | \
  2902.                 TEMP_SENSE_CHARGE_BIT)
  2903. #define TEMP_PERIOD_UPDATE_MS       10000
  2904. #define TEMP_PERIOD_TIMEOUT_MS      3000
  2905. #define BATT_TEMP_LOW_LIMIT     -600
  2906. #define BATT_TEMP_HIGH_LIMIT        1500
  2907. static void update_temp_data(struct work_struct *work)
  2908. {
  2909.     s16 temp;
  2910.     u8 reg[2];
  2911.     bool tried_again = false;
  2912.     int rc, ret, timeout = TEMP_PERIOD_TIMEOUT_MS;
  2913.     struct fg_chip *chip = container_of(work,
  2914.                 struct fg_chip,
  2915.                 update_temp_work.work);
  2916.  
  2917.     if (chip->fg_restarting)
  2918.         goto resched;
  2919.  
  2920.     fg_stay_awake(&chip->update_temp_wakeup_source);
  2921.     if (chip->sw_rbias_ctrl) {
  2922.         rc = fg_mem_masked_write(chip, EXTERNAL_SENSE_SELECT,
  2923.                 BATT_TEMP_CNTRL_MASK,
  2924.                 BATT_TEMP_ON,
  2925.                 BATT_TEMP_OFFSET);
  2926.         if (rc) {
  2927.             pr_err("failed to write BATT_TEMP_ON rc=%d\n", rc);
  2928.             goto out;
  2929.         }
  2930.  
  2931. wait:
  2932.         /* Wait for MEMIF access revoked */
  2933.         ret = wait_for_completion_interruptible_timeout(
  2934.                 &chip->sram_access_revoked,
  2935.                 msecs_to_jiffies(timeout));
  2936.  
  2937.         /* If we were interrupted wait again one more time. */
  2938.         if (ret == -ERESTARTSYS && !tried_again) {
  2939.             tried_again = true;
  2940.             goto wait;
  2941.         } else if (ret <= 0) {
  2942.             rc = -ETIMEDOUT;
  2943.             pr_err("transaction timed out ret=%d\n", ret);
  2944.             goto out;
  2945.         }
  2946.     }
  2947.  
  2948.     /* Read FG_DATA_BATT_TEMP now */
  2949.     rc = fg_mem_read(chip, reg, fg_data[0].address,
  2950.         fg_data[0].len, fg_data[0].offset,
  2951.         chip->sw_rbias_ctrl ? 1 : 0);
  2952.     if (rc) {
  2953.         pr_err("Failed to update temp data\n");
  2954.         goto out;
  2955.     }
  2956.  
  2957.     temp = reg[0] | (reg[1] << 8);
  2958.     temp = (temp * TEMP_LSB_16B / 1000) - DECIKELVIN;
  2959.  
  2960.     /*
  2961.      * If temperature is within the specified range (e.g. -60C and 150C),
  2962.      * update it to the userspace. Otherwise, use the last read good
  2963.      * temperature.
  2964.      */
  2965.     if (temp > chip->batt_temp_low_limit &&
  2966.             temp < chip->batt_temp_high_limit) {
  2967.         chip->last_good_temp = temp;
  2968.         fg_data[0].value = temp;
  2969.     } else {
  2970.         fg_data[0].value = chip->last_good_temp;
  2971.  
  2972.         /*
  2973.          * If the temperature is read before and seems to be in valid
  2974.          * range, then a bad temperature reading could be because of
  2975.          * FG lockup. Trigger the FG reset sequence in such cases.
  2976.          */
  2977.         if (chip->last_temp_update_time && fg_reset_on_lockup &&
  2978.             (chip->last_good_temp > chip->batt_temp_low_limit &&
  2979.             chip->last_good_temp < chip->batt_temp_high_limit)) {
  2980.             pr_err("Batt_temp is %d !, triggering FG reset\n",
  2981.                 temp);
  2982.             fg_check_ima_error_handling(chip);
  2983.         }
  2984.     }
  2985.  
  2986.     if (fg_debug_mask & FG_MEM_DEBUG_READS)
  2987.         pr_info("BATT_TEMP %d %d\n", temp, fg_data[0].value);
  2988.  
  2989.     get_current_time(&chip->last_temp_update_time);
  2990.  
  2991.     if (chip->soc_slope_limiter_en) {
  2992.         fg_stay_awake(&chip->slope_limit_wakeup_source);
  2993.         schedule_work(&chip->slope_limiter_work);
  2994.     }
  2995.  
  2996. out:
  2997.     if (chip->sw_rbias_ctrl) {
  2998.         rc = fg_mem_masked_write(chip, EXTERNAL_SENSE_SELECT,
  2999.                 BATT_TEMP_CNTRL_MASK,
  3000.                 BATT_TEMP_OFF,
  3001.                 BATT_TEMP_OFFSET);
  3002.         if (rc)
  3003.             pr_err("failed to write BATT_TEMP_OFF rc=%d\n", rc);
  3004.     }
  3005.     fg_relax(&chip->update_temp_wakeup_source);
  3006.  
  3007. resched:
  3008.     schedule_delayed_work(
  3009.         &chip->update_temp_work,
  3010.         msecs_to_jiffies(TEMP_PERIOD_UPDATE_MS));
  3011. }
  3012.  
  3013. static void update_jeita_setting(struct work_struct *work)
  3014. {
  3015.     struct fg_chip *chip = container_of(work,
  3016.                 struct fg_chip,
  3017.                 update_jeita_setting.work);
  3018.     u8 reg[4];
  3019.     int i, rc;
  3020.  
  3021.     for (i = 0; i < 4; i++)
  3022.         reg[i] = (settings[FG_MEM_SOFT_COLD + i].value / 10) + 30;
  3023.  
  3024.     rc = fg_mem_write(chip, reg, settings[FG_MEM_SOFT_COLD].address,
  3025.             4, settings[FG_MEM_SOFT_COLD].offset, 0);
  3026.     if (rc)
  3027.         pr_err("failed to update JEITA setting rc=%d\n", rc);
  3028. }
  3029.  
  3030. static int fg_set_resume_soc(struct fg_chip *chip, u8 threshold)
  3031. {
  3032.     u16 address;
  3033.     int offset, rc;
  3034.  
  3035.     address = settings[FG_MEM_RESUME_SOC].address;
  3036.     offset = settings[FG_MEM_RESUME_SOC].offset;
  3037.  
  3038.     rc = fg_mem_masked_write(chip, address, 0xFF, threshold, offset);
  3039.  
  3040.     if (rc)
  3041.         pr_err("write failed rc=%d\n", rc);
  3042.     else
  3043.         pr_debug("setting resume-soc to %x\n", threshold);
  3044.  
  3045.     return rc;
  3046. }
  3047.  
  3048. #define BATT_CYCLE_NUMBER_REG           0x5E8
  3049. #define BATT_CYCLE_OFFSET               0
  3050.  
  3051. static ssize_t fg_get_cycle_counts_bins(struct device *dev,
  3052.                     struct device_attribute *attr,
  3053.                     char *buf)
  3054. {
  3055.     struct fg_chip *chip = dev_get_drvdata(dev);
  3056.     int rc = 0, i, address;
  3057.     u8 data[2];
  3058.     int length = 0;
  3059.  
  3060.     fg_mem_lock(chip);
  3061.     for (i = 0; i < BUCKET_COUNT; i++) {
  3062.         address = BATT_CYCLE_NUMBER_REG + i * 2;
  3063.         rc = fg_mem_read(chip, (u8 *)&data, address, 2,
  3064.                  BATT_CYCLE_OFFSET, 0);
  3065.         if (rc) {
  3066.             pr_err("failed to read bucket %d rc=%d\n", i, rc);
  3067.             chip->cyc_ctr.count[i] = 0;
  3068.         } else
  3069.             chip->cyc_ctr.count[i] = data[0] | data[1] << 8;
  3070.  
  3071.         length += scnprintf(buf + length,
  3072.                     PAGE_SIZE - length, "%d",
  3073.                     chip->cyc_ctr.count[i]);
  3074.  
  3075.         if (i == BUCKET_COUNT-1)
  3076.             length += scnprintf(buf + length,
  3077.                         PAGE_SIZE - length, "\n");
  3078.         else
  3079.             length += scnprintf(buf + length,
  3080.                         PAGE_SIZE - length, " ");
  3081.     }
  3082.     fg_mem_release(chip);
  3083.     return length;
  3084. }
  3085.  
  3086. static ssize_t fg_set_cycle_counts_bins(struct device *dev,
  3087.                     struct device_attribute *attr,
  3088.                     const char *buf, size_t count)
  3089. {
  3090.     struct fg_chip *chip = dev_get_drvdata(dev);
  3091.     int rc = 0, strval[BUCKET_COUNT], bucket, address;
  3092.     u16 cyc_count;
  3093.     u8 data[2];
  3094.  
  3095.     if (sscanf(buf, "%d %d %d %d %d %d %d %d",
  3096.            &strval[0], &strval[1], &strval[2], &strval[3],
  3097.            &strval[4], &strval[5], &strval[6], &strval[7])
  3098.         != BUCKET_COUNT)
  3099.         return -EINVAL;
  3100.     fg_mem_lock(chip);
  3101.     for (bucket = 0; bucket < BUCKET_COUNT; bucket++) {
  3102.         if (strval[bucket] > chip->cyc_ctr.count[bucket]) {
  3103.             cyc_count = strval[bucket];
  3104.             data[0] = cyc_count & 0xFF;
  3105.             data[1] = cyc_count >> 8;
  3106.  
  3107.             address = BATT_CYCLE_NUMBER_REG + bucket * 2;
  3108.  
  3109.             rc = fg_mem_write(chip, data, address, 2,
  3110.                       BATT_CYCLE_OFFSET, 0);
  3111.             if (rc) {
  3112.                 pr_err("failed to write BATT_CYCLE[%d] rc=%d\n",
  3113.                        bucket, rc);
  3114.                 fg_mem_release(chip);
  3115.                 return rc;
  3116.             }
  3117.             chip->cyc_ctr.count[bucket] = cyc_count;
  3118.         }
  3119.     }
  3120.     fg_mem_release(chip);
  3121.     return count;
  3122. }
  3123.  
  3124. static DEVICE_ATTR(cycle_counts_bins, 0660,
  3125.            fg_get_cycle_counts_bins, fg_set_cycle_counts_bins);
  3126.  
  3127. static void restore_cycle_counter(struct fg_chip *chip)
  3128. {
  3129.     int rc = 0, i, address;
  3130.     u8 data[2];
  3131.  
  3132.     fg_mem_lock(chip);
  3133.     for (i = 0; i < BUCKET_COUNT; i++) {
  3134.         address = BATT_CYCLE_NUMBER_REG + i * 2;
  3135.         rc = fg_mem_read(chip, (u8 *)&data, address, 2,
  3136.                 BATT_CYCLE_OFFSET, 0);
  3137.         if (rc)
  3138.             pr_err("Failed to read BATT_CYCLE_NUMBER[%d] rc: %d\n",
  3139.                 i, rc);
  3140.         else
  3141.             chip->cyc_ctr.count[i] = data[0] | data[1] << 8;
  3142.     }
  3143.     fg_mem_release(chip);
  3144. }
  3145.  
  3146. static void clear_cycle_counter(struct fg_chip *chip)
  3147. {
  3148.     int rc = 0, len, i;
  3149.  
  3150.     if (!chip->cyc_ctr.en)
  3151.         return;
  3152.  
  3153.     len = sizeof(chip->cyc_ctr.count);
  3154.     memset(chip->cyc_ctr.count, 0, len);
  3155.     for (i = 0; i < BUCKET_COUNT; i++) {
  3156.         chip->cyc_ctr.started[i] = false;
  3157.         chip->cyc_ctr.last_soc[i] = 0;
  3158.     }
  3159.     rc = fg_mem_write(chip, (u8 *)&chip->cyc_ctr.count,
  3160.             BATT_CYCLE_NUMBER_REG, len,
  3161.             BATT_CYCLE_OFFSET, 0);
  3162.     if (rc)
  3163.         pr_err("failed to write BATT_CYCLE_NUMBER rc=%d\n", rc);
  3164. }
  3165.  
  3166. static int fg_inc_store_cycle_ctr(struct fg_chip *chip, int bucket)
  3167. {
  3168.     int rc = 0, address;
  3169.     u16 cyc_count;
  3170.     u8 data[2];
  3171.  
  3172.     if (bucket < 0 || (bucket > BUCKET_COUNT - 1))
  3173.         return 0;
  3174.  
  3175.     cyc_count = chip->cyc_ctr.count[bucket];
  3176.     cyc_count++;
  3177.     data[0] = cyc_count & 0xFF;
  3178.     data[1] = cyc_count >> 8;
  3179.  
  3180.     address = BATT_CYCLE_NUMBER_REG + bucket * 2;
  3181.  
  3182.     rc = fg_mem_write(chip, data, address, 2, BATT_CYCLE_OFFSET, 0);
  3183.     if (rc)
  3184.         pr_err("failed to write BATT_CYCLE_NUMBER[%d] rc=%d\n",
  3185.             bucket, rc);
  3186.     else
  3187.         chip->cyc_ctr.count[bucket] = cyc_count;
  3188.  
  3189.     if (fg_debug_mask & FG_POWER_SUPPLY)
  3190.         pr_info("Stored bucket %d cyc_count: %d\n", bucket, cyc_count);
  3191.     return rc;
  3192. }
  3193.  
  3194. static void update_cycle_count(struct work_struct *work)
  3195. {
  3196.     int rc = 0, bucket, i;
  3197.     u8 reg[3], batt_soc;
  3198.     struct fg_chip *chip = container_of(work,
  3199.                 struct fg_chip,
  3200.                 cycle_count_work);
  3201.  
  3202.     mutex_lock(&chip->cyc_ctr.lock);
  3203.     rc = fg_mem_read(chip, reg, BATTERY_SOC_REG, 3,
  3204.             BATTERY_SOC_OFFSET, 0);
  3205.     if (rc) {
  3206.         pr_err("Failed to read battery soc rc: %d\n", rc);
  3207.         goto out;
  3208.     }
  3209.     batt_soc = reg[2];
  3210.  
  3211.     if (chip->status == POWER_SUPPLY_STATUS_CHARGING) {
  3212.         /* Find out which bucket the SOC falls in */
  3213.         bucket = batt_soc / BUCKET_SOC_PCT;
  3214.  
  3215.         if (fg_debug_mask & FG_STATUS)
  3216.             pr_info("batt_soc: %x bucket: %d\n", reg[2], bucket);
  3217.  
  3218.         /*
  3219.          * If we've started counting for the previous bucket,
  3220.          * then store the counter for that bucket if the
  3221.          * counter for current bucket is getting started.
  3222.          */
  3223.         if (bucket > 0 && chip->cyc_ctr.started[bucket - 1] &&
  3224.             !chip->cyc_ctr.started[bucket]) {
  3225.             rc = fg_inc_store_cycle_ctr(chip, bucket - 1);
  3226.             if (rc) {
  3227.                 pr_err("Error in storing cycle_ctr rc: %d\n",
  3228.                     rc);
  3229.                 goto out;
  3230.             } else {
  3231.                 chip->cyc_ctr.started[bucket - 1] = false;
  3232.                 chip->cyc_ctr.last_soc[bucket - 1] = 0;
  3233.             }
  3234.         }
  3235.         if (!chip->cyc_ctr.started[bucket]) {
  3236.             chip->cyc_ctr.started[bucket] = true;
  3237.             chip->cyc_ctr.last_soc[bucket] = batt_soc;
  3238.         }
  3239.     } else {
  3240.         for (i = 0; i < BUCKET_COUNT; i++) {
  3241.             if (chip->cyc_ctr.started[i] &&
  3242.                 batt_soc > chip->cyc_ctr.last_soc[i]) {
  3243.                 rc = fg_inc_store_cycle_ctr(chip, i);
  3244.                 if (rc)
  3245.                     pr_err("Error in storing cycle_ctr rc: %d\n",
  3246.                         rc);
  3247.                 chip->cyc_ctr.last_soc[i] = 0;
  3248.             }
  3249.             chip->cyc_ctr.started[i] = false;
  3250.         }
  3251.     }
  3252. out:
  3253.     mutex_unlock(&chip->cyc_ctr.lock);
  3254. }
  3255.  
  3256. static int fg_get_cycle_count(struct fg_chip *chip)
  3257. {
  3258.     int count;
  3259.  
  3260.     if (!chip->cyc_ctr.en)
  3261.         return 0;
  3262.  
  3263.     if ((chip->cyc_ctr.id <= 0) || (chip->cyc_ctr.id > BUCKET_COUNT))
  3264.         return -EINVAL;
  3265.  
  3266.     mutex_lock(&chip->cyc_ctr.lock);
  3267.     count = chip->cyc_ctr.count[chip->cyc_ctr.id - 1];
  3268.     mutex_unlock(&chip->cyc_ctr.lock);
  3269.     return count;
  3270. }
  3271.  
  3272. static void half_float_to_buffer(int64_t uval, u8 *buffer)
  3273. {
  3274.     u16 raw;
  3275.  
  3276.     raw = float_encode(uval);
  3277.     buffer[0] = (u8)(raw & 0xFF);
  3278.     buffer[1] = (u8)((raw >> 8) & 0xFF);
  3279. }
  3280.  
  3281. static int64_t half_float(u8 *buffer)
  3282. {
  3283.     u16 val;
  3284.  
  3285.     val = buffer[1] << 8 | buffer[0];
  3286.     return float_decode(val);
  3287. }
  3288.  
  3289. static int voltage_2b(u8 *buffer)
  3290. {
  3291.     u16 val;
  3292.  
  3293.     val = buffer[1] << 8 | buffer[0];
  3294.     /* the range of voltage 2b is [-5V, 5V], so it will fit in an int */
  3295.     return (int)div_u64(((u64)val) * LSB_16B_NUMRTR, LSB_16B_DENMTR);
  3296. }
  3297.  
  3298. static int bcap_uah_2b(u8 *buffer)
  3299. {
  3300.     u16 val;
  3301.  
  3302.     val = buffer[1] << 8 | buffer[0];
  3303.     return ((int)val) * 1000;
  3304. }
  3305.  
  3306. #define SLOPE_LIMIT_TEMP_THRESHOLD  100
  3307. #define SLOPE_LIMIT_LOW_TEMP_CHG    45
  3308. #define SLOPE_LIMIT_HIGH_TEMP_CHG   2
  3309. #define SLOPE_LIMIT_LOW_TEMP_DISCHG 45
  3310. #define SLOPE_LIMIT_HIGH_TEMP_DISCHG    2
  3311. static void slope_limiter_work(struct work_struct *work)
  3312. {
  3313.     struct fg_chip *chip = container_of(work, struct fg_chip,
  3314.                 slope_limiter_work);
  3315.     enum slope_limit_status status;
  3316.     int batt_temp, rc;
  3317.     u8 buf[2];
  3318.     int64_t val;
  3319.  
  3320.     batt_temp = get_sram_prop_now(chip, FG_DATA_BATT_TEMP);
  3321.  
  3322.     if (chip->status == POWER_SUPPLY_STATUS_CHARGING ||
  3323.             chip->status == POWER_SUPPLY_STATUS_FULL) {
  3324.         if (batt_temp < chip->slope_limit_temp)
  3325.             status = LOW_TEMP_CHARGE;
  3326.         else
  3327.             status = HIGH_TEMP_CHARGE;
  3328.     } else if (chip->status == POWER_SUPPLY_STATUS_DISCHARGING) {
  3329.         if (batt_temp < chip->slope_limit_temp)
  3330.             status = LOW_TEMP_DISCHARGE;
  3331.         else
  3332.             status = HIGH_TEMP_DISCHARGE;
  3333.     } else {
  3334.         goto out;
  3335.     }
  3336.  
  3337.     if (status == chip->slope_limit_sts)
  3338.         goto out;
  3339.  
  3340.     val = chip->slope_limit_coeffs[status];
  3341.     val *= MICRO_UNIT;
  3342.     half_float_to_buffer(val, buf);
  3343.     rc = fg_mem_write(chip, buf,
  3344.             SLOPE_LIMITER_COEFF_REG, 2,
  3345.             SLOPE_LIMITER_COEFF_OFFSET, 0);
  3346.     if (rc) {
  3347.         pr_err("Couldn't write to slope_limiter_coeff_reg, rc=%d\n",
  3348.             rc);
  3349.         goto out;
  3350.     }
  3351.  
  3352.     chip->slope_limit_sts = status;
  3353.     if (fg_debug_mask & FG_STATUS)
  3354.         pr_info("Slope limit sts: %d val: %lld buf[%x %x] written\n",
  3355.             status, val, buf[0], buf[1]);
  3356. out:
  3357.     fg_relax(&chip->slope_limit_wakeup_source);
  3358. }
  3359.  
  3360. static int lookup_ocv_for_soc(struct fg_chip *chip, int soc)
  3361. {
  3362.     int64_t *coeffs;
  3363.  
  3364.     if (soc > chip->ocv_junction_p1p2 * 10)
  3365.         coeffs = chip->ocv_coeffs;
  3366.     else if (soc > chip->ocv_junction_p2p3 * 10)
  3367.         coeffs = chip->ocv_coeffs + 4;
  3368.     else
  3369.         coeffs = chip->ocv_coeffs + 8;
  3370.     /* the range of ocv will fit in a 32 bit int */
  3371.     return (int)(coeffs[0]
  3372.         + div_s64(coeffs[1] * soc, 1000LL)
  3373.         + div_s64(coeffs[2] * soc * soc, 1000000LL)
  3374.         + div_s64(coeffs[3] * soc * soc * soc, 1000000000LL));
  3375. }
  3376.  
  3377. static int lookup_soc_for_ocv(struct fg_chip *chip, int ocv)
  3378. {
  3379.     int64_t val;
  3380.     int soc = -EINVAL;
  3381.     /*
  3382.      * binary search variables representing the valid start and end
  3383.      * percentages to search
  3384.      */
  3385.     int start = 0, end = 1000, mid;
  3386.  
  3387.     if (fg_debug_mask & FG_AGING)
  3388.         pr_info("target_ocv = %d\n", ocv);
  3389.     /* do a binary search for the closest soc to match the ocv */
  3390.     while (end - start > 1) {
  3391.         mid = (start + end) / 2;
  3392.         val = lookup_ocv_for_soc(chip, mid);
  3393.         if (fg_debug_mask & FG_AGING)
  3394.             pr_info("start = %d, mid = %d, end = %d, ocv = %lld\n",
  3395.                     start, mid, end, val);
  3396.         if (ocv < val) {
  3397.             end = mid;
  3398.         } else if (ocv > val) {
  3399.             start = mid;
  3400.         } else {
  3401.             soc = mid;
  3402.             break;
  3403.         }
  3404.     }
  3405.     /*
  3406.      * if the exact soc was not found and there are two or less values
  3407.      * remaining, just compare them and see which one is closest to the ocv
  3408.      */
  3409.     if (soc == -EINVAL) {
  3410.         if (abs(ocv - lookup_ocv_for_soc(chip, start))
  3411.                 > abs(ocv - lookup_ocv_for_soc(chip, end)))
  3412.             soc = end;
  3413.         else
  3414.             soc = start;
  3415.     }
  3416.     if (fg_debug_mask & FG_AGING)
  3417.         pr_info("closest = %d, target_ocv = %d, ocv_found = %d\n",
  3418.                 soc, ocv, lookup_ocv_for_soc(chip, soc));
  3419.     return soc;
  3420. }
  3421.  
  3422. #define ESR_ACTUAL_REG      0x554
  3423. #define BATTERY_ESR_REG     0x4F4
  3424. #define TEMP_RS_TO_RSLOW_REG    0x514
  3425. #define ESR_OFFSET      2
  3426. static int estimate_battery_age(struct fg_chip *chip, int *actual_capacity)
  3427. {
  3428.     int64_t ocv_cutoff_new, ocv_cutoff_aged, temp_rs_to_rslow;
  3429.     int64_t esr_actual, battery_esr, val;
  3430.     int soc_cutoff_aged, soc_cutoff_new, rc;
  3431.     int battery_soc, unusable_soc, batt_temp;
  3432.     u8 buffer[3];
  3433.  
  3434.     if (chip->batt_aging_mode != FG_AGING_ESR)
  3435.         return 0;
  3436.  
  3437.     if (chip->nom_cap_uah == 0) {
  3438.         if (fg_debug_mask & FG_AGING)
  3439.             pr_info("ocv coefficients not loaded, aborting\n");
  3440.         return 0;
  3441.     }
  3442.     fg_mem_lock(chip);
  3443.  
  3444.     batt_temp = get_sram_prop_now(chip, FG_DATA_BATT_TEMP);
  3445.     if (batt_temp < 150 || batt_temp > 400) {
  3446.         if (fg_debug_mask & FG_AGING)
  3447.             pr_info("Battery temp (%d) out of range, aborting\n",
  3448.                     (int)batt_temp);
  3449.         rc = 0;
  3450.         goto done;
  3451.     }
  3452.  
  3453.     battery_soc = get_battery_soc_raw(chip) * 100 / FULL_PERCENT_3B;
  3454.     if (battery_soc < 25 || battery_soc > 75) {
  3455.         if (fg_debug_mask & FG_AGING)
  3456.             pr_info("Battery SoC (%d) out of range, aborting\n",
  3457.                     (int)battery_soc);
  3458.         rc = 0;
  3459.         goto done;
  3460.     }
  3461.  
  3462.     rc = fg_mem_read(chip, buffer, ESR_ACTUAL_REG, 2, 2, 0);
  3463.     esr_actual = half_float(buffer);
  3464.     rc |= fg_mem_read(chip, buffer, BATTERY_ESR_REG, 2, ESR_OFFSET, 0);
  3465.     battery_esr = half_float(buffer);
  3466.  
  3467.     if (rc) {
  3468.         goto error_done;
  3469.     } else if (esr_actual < battery_esr) {
  3470.         if (fg_debug_mask & FG_AGING)
  3471.             pr_info("Batt ESR lower than ESR actual, aborting\n");
  3472.         rc = 0;
  3473.         goto done;
  3474.     }
  3475.     rc = fg_mem_read(chip, buffer, TEMP_RS_TO_RSLOW_REG, 2, 0, 0);
  3476.     temp_rs_to_rslow = half_float(buffer);
  3477.  
  3478.     if (rc)
  3479.         goto error_done;
  3480.  
  3481.     fg_mem_release(chip);
  3482.  
  3483.     if (fg_debug_mask & FG_AGING) {
  3484.         pr_info("batt_soc = %d, cutoff_voltage = %lld, eval current = %d\n",
  3485.                 battery_soc, chip->cutoff_voltage,
  3486.                 chip->evaluation_current);
  3487.         pr_info("temp_rs_to_rslow = %lld, batt_esr = %lld, esr_actual = %lld\n",
  3488.                 temp_rs_to_rslow, battery_esr, esr_actual);
  3489.     }
  3490.  
  3491.     /* calculate soc_cutoff_new */
  3492.     val = (1000000LL + temp_rs_to_rslow) * battery_esr;
  3493.     val = div64_s64(val, 1000000);
  3494.     ocv_cutoff_new = div64_s64(chip->evaluation_current * val, 1000)
  3495.         + chip->cutoff_voltage;
  3496.  
  3497.     /* calculate soc_cutoff_aged */
  3498.     val = (1000000LL + temp_rs_to_rslow) * esr_actual;
  3499.     val = div64_s64(val, 1000000);
  3500.     ocv_cutoff_aged = div64_s64(chip->evaluation_current * val, 1000)
  3501.         + chip->cutoff_voltage;
  3502.  
  3503.     if (fg_debug_mask & FG_AGING)
  3504.         pr_info("ocv_cutoff_new = %lld, ocv_cutoff_aged = %lld\n",
  3505.                 ocv_cutoff_new, ocv_cutoff_aged);
  3506.  
  3507.     soc_cutoff_new = lookup_soc_for_ocv(chip, ocv_cutoff_new);
  3508.     soc_cutoff_aged = lookup_soc_for_ocv(chip, ocv_cutoff_aged);
  3509.  
  3510.     if (fg_debug_mask & FG_AGING)
  3511.         pr_info("aged soc = %d, new soc = %d\n",
  3512.                 soc_cutoff_aged, soc_cutoff_new);
  3513.     unusable_soc = soc_cutoff_aged - soc_cutoff_new;
  3514.  
  3515.     *actual_capacity = div64_s64(((int64_t)chip->nom_cap_uah)
  3516.                 * (1000 - unusable_soc), 1000);
  3517.     if (fg_debug_mask & FG_AGING)
  3518.         pr_info("nom cap = %d, actual cap = %d\n",
  3519.                 chip->nom_cap_uah, *actual_capacity);
  3520.  
  3521.     return rc;
  3522.  
  3523. error_done:
  3524.     pr_err("some register reads failed: %d\n", rc);
  3525. done:
  3526.     fg_mem_release(chip);
  3527.     return rc;
  3528. }
  3529.  
  3530. static void battery_age_work(struct work_struct *work)
  3531. {
  3532.     struct fg_chip *chip = container_of(work,
  3533.                 struct fg_chip,
  3534.                 battery_age_work);
  3535.  
  3536.     estimate_battery_age(chip, &chip->actual_cap_uah);
  3537. }
  3538.  
  3539. static int correction_times[] = {
  3540.     1470,
  3541.     2940,
  3542.     4410,
  3543.     5880,
  3544.     7350,
  3545.     8820,
  3546.     10290,
  3547.     11760,
  3548.     13230,
  3549.     14700,
  3550.     16170,
  3551.     17640,
  3552.     19110,
  3553.     20580,
  3554.     22050,
  3555.     23520,
  3556.     24990,
  3557.     26460,
  3558.     27930,
  3559.     29400,
  3560.     30870,
  3561.     32340,
  3562.     33810,
  3563.     35280,
  3564.     36750,
  3565.     38220,
  3566.     39690,
  3567.     41160,
  3568.     42630,
  3569.     44100,
  3570.     45570,
  3571.     47040,
  3572. };
  3573.  
  3574. static int correction_factors[] = {
  3575.     1000000,
  3576.     1007874,
  3577.     1015789,
  3578.     1023745,
  3579.     1031742,
  3580.     1039780,
  3581.     1047859,
  3582.     1055979,
  3583.     1064140,
  3584.     1072342,
  3585.     1080584,
  3586.     1088868,
  3587.     1097193,
  3588.     1105558,
  3589.     1113964,
  3590.     1122411,
  3591.     1130899,
  3592.     1139427,
  3593.     1147996,
  3594.     1156606,
  3595.     1165256,
  3596.     1173947,
  3597.     1182678,
  3598.     1191450,
  3599.     1200263,
  3600.     1209115,
  3601.     1218008,
  3602.     1226942,
  3603.     1235915,
  3604.     1244929,
  3605.     1253983,
  3606.     1263076,
  3607. };
  3608.  
  3609. #define FG_CONVERSION_FACTOR    (64198531LL)
  3610. static int iavg_3b_to_uah(u8 *buffer, int delta_ms)
  3611. {
  3612.     int64_t val, i_filtered;
  3613.     int i, correction_factor;
  3614.  
  3615.     for (i = 0; i < ARRAY_SIZE(correction_times); i++) {
  3616.         if (correction_times[i] > delta_ms)
  3617.             break;
  3618.     }
  3619.     if (i >= ARRAY_SIZE(correction_times)) {
  3620.         if (fg_debug_mask & FG_STATUS)
  3621.             pr_info("fuel gauge took more than 32 cycles\n");
  3622.         i = ARRAY_SIZE(correction_times) - 1;
  3623.     }
  3624.     correction_factor = correction_factors[i];
  3625.     if (fg_debug_mask & FG_STATUS)
  3626.         pr_info("delta_ms = %d, cycles = %d, correction = %d\n",
  3627.                 delta_ms, i, correction_factor);
  3628.     val = buffer[2] << 16 | buffer[1] << 8 | buffer[0];
  3629.     /* convert val from signed 24b to signed 64b */
  3630.     i_filtered = (val << 40) >> 40;
  3631.     val = i_filtered * correction_factor;
  3632.     val = div64_s64(val + FG_CONVERSION_FACTOR / 2, FG_CONVERSION_FACTOR);
  3633.     if (fg_debug_mask & FG_STATUS)
  3634.         pr_info("i_filtered = 0x%llx/%lld, cc_uah = %lld\n",
  3635.                 i_filtered, i_filtered, val);
  3636.  
  3637.     return val;
  3638. }
  3639.  
  3640. static bool fg_is_temperature_ok_for_learning(struct fg_chip *chip)
  3641. {
  3642.     int batt_temp = get_sram_prop_now(chip, FG_DATA_BATT_TEMP);
  3643.  
  3644.     if (batt_temp > chip->learning_data.max_temp
  3645.             || batt_temp < chip->learning_data.min_temp) {
  3646.         if (fg_debug_mask & FG_AGING)
  3647.             pr_info("temp (%d) out of range [%d, %d], aborting\n",
  3648.                     batt_temp,
  3649.                     chip->learning_data.min_temp,
  3650.                     chip->learning_data.max_temp);
  3651.         return false;
  3652.     }
  3653.     return true;
  3654. }
  3655.  
  3656. static void fg_cap_learning_stop(struct fg_chip *chip)
  3657. {
  3658.     chip->learning_data.cc_uah = 0;
  3659.     chip->learning_data.active = false;
  3660. }
  3661.  
  3662. #define I_FILTERED_REG          0x584
  3663. static void fg_cap_learning_work(struct work_struct *work)
  3664. {
  3665.     struct fg_chip *chip = container_of(work,
  3666.                 struct fg_chip,
  3667.                 fg_cap_learning_work);
  3668.     u8 i_filtered[3], data[3];
  3669.     int rc, cc_uah, delta_ms;
  3670.     ktime_t now_kt, delta_kt;
  3671.  
  3672.     mutex_lock(&chip->learning_data.learning_lock);
  3673.     if (!chip->learning_data.active)
  3674.         goto fail;
  3675.     if (!fg_is_temperature_ok_for_learning(chip)) {
  3676.         fg_cap_learning_stop(chip);
  3677.         goto fail;
  3678.     }
  3679.  
  3680.     if (chip->wa_flag & USE_CC_SOC_REG)
  3681.         goto fail;
  3682.  
  3683.     fg_mem_lock(chip);
  3684.  
  3685.     rc = fg_mem_read(chip, i_filtered, I_FILTERED_REG, 3, 0, 0);
  3686.     if (rc) {
  3687.         pr_err("Failed to read i_filtered: %d\n", rc);
  3688.         fg_mem_release(chip);
  3689.         goto fail;
  3690.     }
  3691.     memset(data, 0, 3);
  3692.     rc = fg_mem_write(chip, data, I_FILTERED_REG, 3, 0, 0);
  3693.     if (rc) {
  3694.         pr_err("Failed to clear i_filtered: %d\n", rc);
  3695.         fg_mem_release(chip);
  3696.         goto fail;
  3697.     }
  3698.     fg_mem_release(chip);
  3699.  
  3700.     now_kt = ktime_get_boottime();
  3701.     delta_kt = ktime_sub(now_kt, chip->learning_data.time_stamp);
  3702.     chip->learning_data.time_stamp = now_kt;
  3703.  
  3704.     delta_ms = (int)div64_s64(ktime_to_ns(delta_kt), 1000000);
  3705.  
  3706.     cc_uah = iavg_3b_to_uah(i_filtered, delta_ms);
  3707.     chip->learning_data.cc_uah -= cc_uah;
  3708.     if (fg_debug_mask & FG_AGING)
  3709.         pr_info("total_cc_uah = %lld\n", chip->learning_data.cc_uah);
  3710.  
  3711. fail:
  3712.     if (chip->wa_flag & USE_CC_SOC_REG)
  3713.         fg_relax(&chip->capacity_learning_wakeup_source);
  3714.     mutex_unlock(&chip->learning_data.learning_lock);
  3715.     if (chip->wa_flag & USE_CC_SOC_REG)
  3716.         fg_relax(&chip->capacity_learning_wakeup_source);
  3717.     return;
  3718.  
  3719. }
  3720.  
  3721. #define CC_SOC_BASE_REG     0x5BC
  3722. #define CC_SOC_OFFSET       3
  3723. #define CC_SOC_MAGNITUDE_MASK   0x1FFFFFFF
  3724. #define CC_SOC_NEGATIVE_BIT BIT(29)
  3725. static int fg_get_cc_soc(struct fg_chip *chip, int *cc_soc)
  3726. {
  3727.     int rc;
  3728.     u8 reg[4];
  3729.     unsigned int temp, magnitude;
  3730.  
  3731.     rc = fg_mem_read(chip, reg, CC_SOC_BASE_REG, 4, CC_SOC_OFFSET, 0);
  3732.     if (rc) {
  3733.         pr_err("Failed to read CC_SOC_REG rc=%d\n", rc);
  3734.         return rc;
  3735.     }
  3736.  
  3737.     temp = reg[3] << 24 | reg[2] << 16 | reg[1] << 8 | reg[0];
  3738.     magnitude = temp & CC_SOC_MAGNITUDE_MASK;
  3739.     if (temp & CC_SOC_NEGATIVE_BIT)
  3740.         *cc_soc = -1 * (~magnitude + 1);
  3741.     else
  3742.         *cc_soc = magnitude;
  3743.     return 0;
  3744. }
  3745.  
  3746. static int fg_get_current_cc(struct fg_chip *chip)
  3747. {
  3748.     int cc_soc, rc;
  3749.     int64_t current_capacity;
  3750.     if (!(chip->wa_flag & USE_CC_SOC_REG))
  3751.         return chip->learning_data.cc_uah;
  3752.     if (!chip->learning_data.learned_cc_uah)
  3753.         return -EINVAL;
  3754.     rc = fg_get_cc_soc(chip, &cc_soc);
  3755.     if (rc < 0) {
  3756.         pr_err("Failed to get cc_soc, rc=%d\n", rc);
  3757.         return rc;
  3758.     }
  3759.     current_capacity = cc_soc * chip->learning_data.learned_cc_uah;
  3760.     current_capacity = div64_u64(current_capacity, FULL_PERCENT_28BIT);
  3761.     return current_capacity;
  3762. }
  3763.  
  3764. #define BATT_MISSING_STS BIT(6)
  3765. static bool is_battery_missing(struct fg_chip *chip)
  3766. {
  3767.     int rc;
  3768.     u8 fg_batt_sts;
  3769.  
  3770.     rc = fg_read(chip, &fg_batt_sts,
  3771.                  INT_RT_STS(chip->batt_base), 1);
  3772.     if (rc) {
  3773.         pr_err("spmi read failed: addr=%03X, rc=%d\n",
  3774.                 INT_RT_STS(chip->batt_base), rc);
  3775.         return false;
  3776.     }
  3777.  
  3778.     return (fg_batt_sts & BATT_MISSING_STS) ? true : false;
  3779. }
  3780.  
  3781. static int fg_cap_learning_process_full_data(struct fg_chip *chip)
  3782. {
  3783.     int cc_pc_val, rc = -EINVAL;
  3784.     unsigned int cc_soc_delta_pc;
  3785.     int64_t delta_cc_uah;
  3786.     uint64_t temp;
  3787.     bool batt_missing = is_battery_missing(chip);
  3788.  
  3789.     if (batt_missing) {
  3790.         pr_err("Battery is missing!\n");
  3791.         goto fail;
  3792.     }
  3793.  
  3794.     if (!chip->learning_data.active)
  3795.         goto fail;
  3796.  
  3797.     if (!fg_is_temperature_ok_for_learning(chip)) {
  3798.         fg_cap_learning_stop(chip);
  3799.         goto fail;
  3800.     }
  3801.  
  3802.     rc = fg_get_cc_soc(chip, &cc_pc_val);
  3803.     if (rc) {
  3804.         pr_err("failed to get CC_SOC, stopping capacity learning\n");
  3805.         fg_cap_learning_stop(chip);
  3806.         goto fail;
  3807.     }
  3808.  
  3809.     temp = abs(cc_pc_val - chip->learning_data.init_cc_pc_val);
  3810.     cc_soc_delta_pc = DIV_ROUND_CLOSEST_ULL(temp * 100, FULL_PERCENT_28BIT);
  3811.  
  3812.     delta_cc_uah = div64_s64(
  3813.             chip->nom_cap_uah * cc_soc_delta_pc,
  3814.             100);
  3815.     chip->learning_data.cc_uah = delta_cc_uah + chip->learning_data.cc_uah;
  3816.  
  3817.     pr_info("current cc_soc=%d cc_soc_pc=%d init_cc_pc_val=%d delta_cc_uah=%lld learned_cc_uah=%lld total_cc_uah = %lld\n",
  3818.         cc_pc_val, cc_soc_delta_pc,
  3819.         chip->learning_data.init_cc_pc_val,
  3820.         delta_cc_uah,
  3821.         chip->learning_data.learned_cc_uah,
  3822.         chip->learning_data.cc_uah);
  3823.  
  3824.     return 0;
  3825.  
  3826. fail:
  3827.     return rc;
  3828. }
  3829.  
  3830. #define FG_CAP_LEARNING_INTERVAL_NS 30000000000
  3831. static enum alarmtimer_restart fg_cap_learning_alarm_cb(struct alarm *alarm,
  3832.                             ktime_t now)
  3833. {
  3834.     struct fg_chip *chip = container_of(alarm, struct fg_chip,
  3835.                     fg_cap_learning_alarm);
  3836.  
  3837.     if (chip->learning_data.active) {
  3838.         if (fg_debug_mask & FG_AGING)
  3839.             pr_info("alarm fired\n");
  3840.         schedule_work(&chip->fg_cap_learning_work);
  3841.         alarm_forward_now(alarm,
  3842.                 ns_to_ktime(FG_CAP_LEARNING_INTERVAL_NS));
  3843.         return ALARMTIMER_RESTART;
  3844.     }
  3845.     if (fg_debug_mask & FG_AGING)
  3846.         pr_info("alarm misfired\n");
  3847.     return ALARMTIMER_NORESTART;
  3848. }
  3849.  
  3850. #define FG_AGING_STORAGE_REG        0x5E4
  3851. #define ACTUAL_CAPACITY_REG     0x578
  3852. #define MAH_TO_SOC_CONV_REG     0x4A0
  3853. #define CC_SOC_COEFF_OFFSET     0
  3854. #define ACTUAL_CAPACITY_OFFSET      2
  3855. #define MAH_TO_SOC_CONV_CS_OFFSET   0
  3856. static int fg_calc_and_store_cc_soc_coeff(struct fg_chip *chip, int16_t cc_mah)
  3857. {
  3858.     int rc;
  3859.     int64_t cc_to_soc_coeff, mah_to_soc;
  3860.     u8 data[2];
  3861.  
  3862.     rc = fg_mem_write(chip, (u8 *)&cc_mah, ACTUAL_CAPACITY_REG, 2,
  3863.             ACTUAL_CAPACITY_OFFSET, 0);
  3864.     if (rc) {
  3865.         pr_err("Failed to store actual capacity: %d\n", rc);
  3866.         return rc;
  3867.     }
  3868.  
  3869.     rc = fg_mem_read(chip, (u8 *)&data, MAH_TO_SOC_CONV_REG, 2,
  3870.             MAH_TO_SOC_CONV_CS_OFFSET, 0);
  3871.     if (rc) {
  3872.         pr_err("Failed to read mah_to_soc_conv_cs: %d\n", rc);
  3873.     } else {
  3874.         mah_to_soc = data[1] << 8 | data[0];
  3875.         mah_to_soc *= MICRO_UNIT;
  3876.         cc_to_soc_coeff = div64_s64(mah_to_soc, cc_mah);
  3877.         half_float_to_buffer(cc_to_soc_coeff, data);
  3878.         rc = fg_mem_write(chip, (u8 *)data,
  3879.                 ACTUAL_CAPACITY_REG, 2,
  3880.                 CC_SOC_COEFF_OFFSET, 0);
  3881.         if (rc)
  3882.             pr_err("Failed to write cc_soc_coeff_offset: %d\n",
  3883.                 rc);
  3884.         else if (fg_debug_mask & FG_AGING)
  3885.             pr_info("new cc_soc_coeff %lld [%x %x] saved to sram\n",
  3886.                 cc_to_soc_coeff, data[0], data[1]);
  3887.     }
  3888.     return rc;
  3889. }
  3890.  
  3891. static void fg_cap_learning_load_data(struct fg_chip *chip)
  3892. {
  3893.     int16_t cc_mah;
  3894.     int64_t old_cap = chip->learning_data.learned_cc_uah;
  3895.     int rc;
  3896.  
  3897.     rc = fg_mem_read(chip, (u8 *)&cc_mah, FG_AGING_STORAGE_REG, 2, 0, 0);
  3898.     if (rc) {
  3899.         pr_err("Failed to load aged capacity: %d\n", rc);
  3900.     } else {
  3901.         chip->learning_data.learned_cc_uah = cc_mah * 1000;
  3902.         if (fg_debug_mask & FG_AGING)
  3903.             pr_info("learned capacity %lld-> %lld/%x uah\n",
  3904.                     old_cap,
  3905.                     chip->learning_data.learned_cc_uah,
  3906.                     cc_mah);
  3907.     }
  3908. }
  3909.  
  3910. static void fg_cap_learning_save_data(struct fg_chip *chip)
  3911. {
  3912.     int16_t cc_mah;
  3913.     int rc;
  3914.     bool batt_missing = is_battery_missing(chip);
  3915.  
  3916.     if (batt_missing) {
  3917.         pr_err("Battery is missing!\n");
  3918.         return;
  3919.     }
  3920.  
  3921.     cc_mah = div64_s64(chip->learning_data.learned_cc_uah, 1000);
  3922.  
  3923.     rc = fg_mem_write(chip, (u8 *)&cc_mah, FG_AGING_STORAGE_REG, 2, 0, 0);
  3924.     if (rc)
  3925.         pr_err("Failed to store aged capacity: %d\n", rc);
  3926.     else if (fg_debug_mask & FG_AGING)
  3927.         pr_info("learned capacity %lld uah (%d/0x%x uah) saved to sram\n",
  3928.                 chip->learning_data.learned_cc_uah,
  3929.                 cc_mah, cc_mah);
  3930.  
  3931.     if (chip->learning_data.feedback_on) {
  3932.         rc = fg_calc_and_store_cc_soc_coeff(chip, cc_mah);
  3933.         if (rc)
  3934.             pr_err("Error in storing cc_soc_coeff, rc:%d\n", rc);
  3935.     }
  3936. }
  3937.  
  3938. static void fg_cap_learning_post_process(struct fg_chip *chip)
  3939. {
  3940.     int64_t max_inc_val, min_dec_val, old_cap;
  3941.     bool batt_missing = is_battery_missing(chip);
  3942.  
  3943.     if (batt_missing) {
  3944.         pr_err("Battery is missing!\n");
  3945.         return;
  3946.     }
  3947.  
  3948.     max_inc_val = (int64_t)chip->nom_cap_uah
  3949.             * (1000 + chip->learning_data.max_increment);
  3950.     max_inc_val = div_s64(max_inc_val, 1000);
  3951.  
  3952.     min_dec_val = (int64_t)chip->learning_data.learned_cc_uah
  3953.             * (1000 - chip->learning_data.max_decrement);
  3954.     min_dec_val = div_s64(min_dec_val, 1000);
  3955.  
  3956.     old_cap = chip->learning_data.learned_cc_uah;
  3957.     if (chip->learning_data.cc_uah > max_inc_val)
  3958.         chip->learning_data.learned_cc_uah = max_inc_val;
  3959.     else if (chip->learning_data.cc_uah < min_dec_val)
  3960.         chip->learning_data.learned_cc_uah = min_dec_val;
  3961.     else
  3962.         chip->learning_data.learned_cc_uah =
  3963.             chip->learning_data.cc_uah;
  3964.  
  3965.     if (chip->learning_data.max_cap_limit) {
  3966.         max_inc_val = (int64_t)chip->nom_cap_uah * (1000 +
  3967.                 chip->learning_data.max_cap_limit);
  3968.         max_inc_val = div64_u64(max_inc_val, 1000);
  3969.         if (chip->learning_data.cc_uah > max_inc_val) {
  3970.             if (fg_debug_mask & FG_AGING)
  3971.                 pr_info("learning capacity %lld goes above max limit %lld\n",
  3972.                     chip->learning_data.cc_uah,
  3973.                     max_inc_val);
  3974.             chip->learning_data.learned_cc_uah = max_inc_val;
  3975.         }
  3976.     }
  3977.  
  3978.     if (chip->learning_data.min_cap_limit) {
  3979.         min_dec_val = (int64_t)chip->nom_cap_uah * (1000 -
  3980.                 chip->learning_data.min_cap_limit);
  3981.         min_dec_val = div64_u64(min_dec_val, 1000);
  3982.         if (chip->learning_data.cc_uah < min_dec_val) {
  3983.             if (fg_debug_mask & FG_AGING)
  3984.                 pr_info("learning capacity %lld goes below min limit %lld\n",
  3985.                     chip->learning_data.cc_uah,
  3986.                     min_dec_val);
  3987.             chip->learning_data.learned_cc_uah = min_dec_val;
  3988.         }
  3989.     }
  3990.  
  3991.     fg_cap_learning_save_data(chip);
  3992.     pr_info("final cc_uah = %lld, learned capacity %lld -> %lld uah\n",
  3993.         chip->learning_data.cc_uah,
  3994.         old_cap, chip->learning_data.learned_cc_uah);
  3995. }
  3996.  
  3997. static int get_vbat_est_diff(struct fg_chip *chip)
  3998. {
  3999.     return abs(fg_data[FG_DATA_VOLTAGE].value
  4000.                 - fg_data[FG_DATA_CPRED_VOLTAGE].value);
  4001. }
  4002.  
  4003. #define CBITS_INPUT_FILTER_REG      0x4B4
  4004. #define IBATTF_TAU_MASK         0x38
  4005. #define IBATTF_TAU_99_S         0x30
  4006. static int set_prop_enable_charging(struct fg_chip *chip, bool enable);
  4007. static int fg_do_restart(struct fg_chip *chip, bool write_profile);
  4008. static int fg_vbat_est_check(struct fg_chip *chip)
  4009. {
  4010.     int rc = 0;
  4011.     int vbat_est_diff, vbat_est_thr_uv;
  4012.     bool batt_missing = is_battery_missing(chip);
  4013.  
  4014.     vbat_est_diff = get_vbat_est_diff(chip);
  4015.     vbat_est_thr_uv = chip->learning_data.vbat_est_thr_uv;
  4016.     pr_info("vbat(%d),est-vbat(%d),diff(%d),threshold(%d)\n",
  4017.             fg_data[FG_DATA_VOLTAGE].value,
  4018.             fg_data[FG_DATA_CPRED_VOLTAGE].value,
  4019.             vbat_est_diff, vbat_est_thr_uv);
  4020.  
  4021.     if ((vbat_est_diff > vbat_est_thr_uv)
  4022.         && chip->fg_force_restart_enable
  4023.         && chip->first_profile_loaded
  4024.         && !chip->fg_restarting
  4025.         && !batt_missing) {
  4026.         bool enabled = !chip->charging_disabled;
  4027.         pr_info("vbat_est_diff is larger than vbat_est_thr_uv,so force fg restart\n");
  4028.         /* Disable charging before restart fg */
  4029.         if (enabled) {
  4030.             rc = set_prop_enable_charging(chip, false);
  4031.             if (rc)
  4032.                 pr_err("Failed to disable charging, rc=%d\n", rc);
  4033.         }
  4034.         rc = fg_do_restart(chip, true);
  4035.         if (rc)
  4036.             pr_err("fg restart failed: %d\n", rc);
  4037.         /* Enable charging after restart fg */
  4038.         if (enabled && chip->charging_disabled) {
  4039.             rc = set_prop_enable_charging(chip, true);
  4040.             if (rc)
  4041.                 pr_err("Failed to enable charging, rc=%d\n", rc);
  4042.         }
  4043.     }
  4044.     return rc;
  4045. }
  4046.  
  4047. static int fg_cap_learning_check(struct fg_chip *chip)
  4048. {
  4049.     u8 data[4];
  4050.     int rc = 0, battery_soc, cc_pc_val;
  4051.     int vbat_est_diff, vbat_est_thr_uv;
  4052.     unsigned int cc_pc_100 = FULL_PERCENT_28BIT;
  4053.  
  4054.     mutex_lock(&chip->learning_data.learning_lock);
  4055.     if (chip->status == POWER_SUPPLY_STATUS_CHARGING
  4056.                 && !chip->learning_data.active
  4057.                 && chip->batt_aging_mode == FG_AGING_CC) {
  4058.         if (chip->learning_data.learned_cc_uah == 0) {
  4059.             if (fg_debug_mask & FG_AGING)
  4060.                 pr_info("no capacity, aborting\n");
  4061.             goto fail;
  4062.         }
  4063.  
  4064.         if (!fg_is_temperature_ok_for_learning(chip))
  4065.             goto fail;
  4066.  
  4067.         fg_mem_lock(chip);
  4068.         if (!chip->learning_data.feedback_on) {
  4069.             vbat_est_diff = get_vbat_est_diff(chip);
  4070.             vbat_est_thr_uv = chip->learning_data.vbat_est_thr_uv;
  4071.             if (vbat_est_diff >= vbat_est_thr_uv &&
  4072.                     vbat_est_thr_uv > 0) {
  4073.                 if (fg_debug_mask & FG_AGING)
  4074.                     pr_info("vbat_est_diff (%d) < threshold (%d)\n",
  4075.                         vbat_est_diff, vbat_est_thr_uv);
  4076.                 fg_mem_release(chip);
  4077.                 fg_cap_learning_stop(chip);
  4078.                 goto fail;
  4079.             }
  4080.         }
  4081.         battery_soc = get_battery_soc_raw(chip);
  4082.         if (fg_debug_mask & FG_AGING)
  4083.             pr_info("checking battery soc (%d vs %d)\n",
  4084.                 battery_soc * 100 / FULL_PERCENT_3B,
  4085.                 chip->learning_data.max_start_soc);
  4086.         /* check if the battery is low enough to start soc learning */
  4087.         if (battery_soc * 100 / FULL_PERCENT_3B
  4088.                 > chip->learning_data.max_start_soc) {
  4089.             if (fg_debug_mask & FG_AGING)
  4090.                 pr_info("battery soc too high (%d > %d), aborting\n",
  4091.                     battery_soc * 100 / FULL_PERCENT_3B,
  4092.                     chip->learning_data.max_start_soc);
  4093.             fg_mem_release(chip);
  4094.             fg_cap_learning_stop(chip);
  4095.             goto fail;
  4096.         }
  4097.  
  4098.         /* set the coulomb counter to a percentage of the capacity */
  4099.         chip->learning_data.cc_uah = div64_s64(
  4100.             (chip->learning_data.learned_cc_uah * battery_soc),
  4101.                 FULL_PERCENT_3B);
  4102.  
  4103.         /* Use CC_SOC_REG based capacity learning */
  4104.         if (chip->wa_flag & USE_CC_SOC_REG) {
  4105.             fg_mem_release(chip);
  4106.             /* SW_CC_SOC based capacity learning */
  4107.             if (fg_get_cc_soc(chip, &cc_pc_val)) {
  4108.                 pr_err("failed to get CC_SOC, stop capacity learning\n");
  4109.                 fg_cap_learning_stop(chip);
  4110.                 goto fail;
  4111.             }
  4112.  
  4113.             chip->learning_data.init_cc_pc_val = cc_pc_val;
  4114.             chip->learning_data.active = true;
  4115.             pr_info("SW_CC_SOC based learning init_CC_SOC=%d\n",
  4116.                 chip->learning_data.init_cc_pc_val);
  4117.         } else {
  4118.             rc = fg_mem_masked_write(chip, CBITS_INPUT_FILTER_REG,
  4119.                     IBATTF_TAU_MASK, IBATTF_TAU_99_S, 0);
  4120.             if (rc) {
  4121.                 pr_err("Failed to write IF IBAT Tau: %d\n",
  4122.                                 rc);
  4123.                 fg_mem_release(chip);
  4124.                 fg_cap_learning_stop(chip);
  4125.                 goto fail;
  4126.             }
  4127.  
  4128.             /* clear the i_filtered register */
  4129.             memset(data, 0, 4);
  4130.             rc = fg_mem_write(chip, data, I_FILTERED_REG, 3, 0, 0);
  4131.             if (rc) {
  4132.                 pr_err("Failed to clear i_filtered: %d\n", rc);
  4133.                 fg_mem_release(chip);
  4134.                 fg_cap_learning_stop(chip);
  4135.                 goto fail;
  4136.             }
  4137.             fg_mem_release(chip);
  4138.             chip->learning_data.time_stamp = ktime_get_boottime();
  4139.             chip->learning_data.active = true;
  4140.  
  4141.             if (fg_debug_mask & FG_AGING)
  4142.                 pr_info("cap learning started, soc = %d cc_uah = %lld\n",
  4143.                     battery_soc * 100 / FULL_PERCENT_3B,
  4144.                     chip->learning_data.cc_uah);
  4145.             rc = alarm_start_relative(&chip->fg_cap_learning_alarm,
  4146.                 ns_to_ktime(FG_CAP_LEARNING_INTERVAL_NS));
  4147.             if (rc) {
  4148.                 pr_err("Failed to start alarm: %d\n", rc);
  4149.                 fg_cap_learning_stop(chip);
  4150.                 goto fail;
  4151.             }
  4152.         }
  4153.     } else if ((chip->status != POWER_SUPPLY_STATUS_CHARGING)
  4154.                 && chip->learning_data.active) {
  4155.         if (fg_debug_mask & FG_AGING)
  4156.             pr_info("capacity learning stopped\n");
  4157.         if (!(chip->wa_flag & USE_CC_SOC_REG))
  4158.             alarm_try_to_cancel(&chip->fg_cap_learning_alarm);
  4159.  
  4160.         if (chip->status == POWER_SUPPLY_STATUS_FULL) {
  4161.             if (chip->wa_flag & USE_CC_SOC_REG) {
  4162.                 rc = fg_cap_learning_process_full_data(chip);
  4163.                 if (rc) {
  4164.                     fg_cap_learning_stop(chip);
  4165.                     goto fail;
  4166.                 }
  4167.                 /* reset SW_CC_SOC register to 100% */
  4168.                 rc = fg_mem_write(chip, (u8 *)&cc_pc_100,
  4169.                     CC_SOC_BASE_REG, 4, CC_SOC_OFFSET, 0);
  4170.                 if (rc)
  4171.                     pr_err("Failed to reset CC_SOC_REG rc=%d\n",
  4172.                                     rc);
  4173.             }
  4174.             fg_cap_learning_post_process(chip);
  4175.         }
  4176.  
  4177.         fg_cap_learning_stop(chip);
  4178.     } else if (chip->status == POWER_SUPPLY_STATUS_FULL) {
  4179.         if (chip->wa_flag & USE_CC_SOC_REG) {
  4180.             /* reset SW_CC_SOC register to 100% upon charge_full */
  4181.             rc = fg_mem_write(chip, (u8 *)&cc_pc_100,
  4182.                 CC_SOC_BASE_REG, 4, CC_SOC_OFFSET, 0);
  4183.             if (rc)
  4184.                 pr_err("Failed to reset CC_SOC_REG rc=%d\n",
  4185.                                 rc);
  4186.             else if (fg_debug_mask & FG_STATUS)
  4187.                 pr_info("Reset SW_CC_SOC to full value\n");
  4188.         }
  4189.     }
  4190.  
  4191. fail:
  4192.     mutex_unlock(&chip->learning_data.learning_lock);
  4193.     return rc;
  4194. }
  4195.  
  4196. static bool is_usb_present(struct fg_chip *chip)
  4197. {
  4198.     union power_supply_propval prop = {0,};
  4199.  
  4200.     if (!chip->usb_psy)
  4201.         chip->usb_psy = power_supply_get_by_name("usb");
  4202.  
  4203.     if (chip->usb_psy)
  4204.         power_supply_get_property(chip->usb_psy,
  4205.                 POWER_SUPPLY_PROP_PRESENT, &prop);
  4206.     return prop.intval != 0;
  4207. }
  4208.  
  4209. static bool is_dc_present(struct fg_chip *chip)
  4210. {
  4211.     union power_supply_propval prop = {0,};
  4212.     if (!chip->dc_psy)
  4213.         chip->dc_psy = power_supply_get_by_name("dc");
  4214.  
  4215.     if (chip->dc_psy)
  4216.         power_supply_get_property(chip->dc_psy,
  4217.                 POWER_SUPPLY_PROP_PRESENT, &prop);
  4218.     return prop.intval != 0;
  4219. }
  4220.  
  4221. static bool is_input_present(struct fg_chip *chip)
  4222. {
  4223.     return is_usb_present(chip) || is_dc_present(chip);
  4224. }
  4225.  
  4226. static bool is_otg_present(struct fg_chip *chip)
  4227. {
  4228.     union power_supply_propval prop = {0,};
  4229.  
  4230.     if (!chip->usb_psy)
  4231.         chip->usb_psy = power_supply_get_by_name("usb");
  4232.  
  4233.     if (chip->usb_psy)
  4234.         power_supply_get_property(chip->usb_psy,
  4235.                 POWER_SUPPLY_PROP_USB_OTG, &prop);
  4236.     return prop.intval != 0;
  4237. }
  4238.  
  4239. static bool is_charger_available(struct fg_chip *chip)
  4240. {
  4241.     if (!chip->batt_psy_name)
  4242.         return false;
  4243.  
  4244.     if (!chip->batt_psy)
  4245.         chip->batt_psy = power_supply_get_by_name(chip->batt_psy_name);
  4246.  
  4247.     if (!chip->batt_psy)
  4248.         return false;
  4249.  
  4250.     return true;
  4251. }
  4252.  
  4253. static int set_prop_enable_charging(struct fg_chip *chip, bool enable)
  4254. {
  4255.     int rc = 0;
  4256.     union power_supply_propval ret = {enable, };
  4257.  
  4258.     if (!is_charger_available(chip)) {
  4259.         pr_err("Charger not available yet!\n");
  4260.         return -EINVAL;
  4261.     }
  4262.  
  4263.     rc = power_supply_set_property(chip->batt_psy,
  4264.             POWER_SUPPLY_PROP_BATTERY_CHARGING_ENABLED,
  4265.             &ret);
  4266.     if (rc) {
  4267.         pr_err("couldn't configure batt chg %d\n", rc);
  4268.         return rc;
  4269.     }
  4270.  
  4271.     chip->charging_disabled = !enable;
  4272.     if (fg_debug_mask & FG_STATUS)
  4273.         pr_info("%sabling charging\n", enable ? "en" : "dis");
  4274.  
  4275.     return rc;
  4276. }
  4277.  
  4278. #define MAX_BATTERY_CC_SOC_CAPACITY     150
  4279. static void status_change_work(struct work_struct *work)
  4280. {
  4281.     struct fg_chip *chip = container_of(work,
  4282.                 struct fg_chip,
  4283.                 status_change_work);
  4284.     unsigned long current_time = 0;
  4285.     int cc_soc, batt_soc, rc, capacity = get_prop_capacity(chip);
  4286.     bool batt_missing = is_battery_missing(chip);
  4287.  
  4288.     if (batt_missing) {
  4289.         if (fg_debug_mask & FG_STATUS)
  4290.             pr_info("Battery is missing\n");
  4291.         return;
  4292.     }
  4293.  
  4294.     if (chip->esr_pulse_tune_en) {
  4295.         fg_stay_awake(&chip->esr_extract_wakeup_source);
  4296.         schedule_work(&chip->esr_extract_config_work);
  4297.     }
  4298.  
  4299.     if (chip->status == POWER_SUPPLY_STATUS_FULL) {
  4300.         if (capacity >= 99 && chip->hold_soc_while_full
  4301.                 && chip->health == POWER_SUPPLY_HEALTH_GOOD) {
  4302.             if (fg_debug_mask & FG_STATUS)
  4303.                 pr_info("holding soc at 100\n");
  4304.             chip->charge_full = true;
  4305.         } else if (fg_debug_mask & FG_STATUS) {
  4306.             pr_info("terminated charging at %d/0x%02x\n",
  4307.                     capacity, get_monotonic_soc_raw(chip));
  4308.         }
  4309.     }
  4310.     if (chip->status == POWER_SUPPLY_STATUS_FULL ||
  4311.             chip->status == POWER_SUPPLY_STATUS_CHARGING) {
  4312.         if (!chip->vbat_low_irq_enabled &&
  4313.                 !chip->use_vbat_low_empty_soc) {
  4314.             enable_irq(chip->batt_irq[VBATT_LOW].irq);
  4315.             enable_irq_wake(chip->batt_irq[VBATT_LOW].irq);
  4316.             chip->vbat_low_irq_enabled = true;
  4317.         }
  4318.  
  4319.         if (!chip->full_soc_irq_enabled) {
  4320.             enable_irq(chip->soc_irq[FULL_SOC].irq);
  4321.             enable_irq_wake(chip->soc_irq[FULL_SOC].irq);
  4322.             chip->full_soc_irq_enabled = true;
  4323.         }
  4324.  
  4325.         if (!!(chip->wa_flag & PULSE_REQUEST_WA) && capacity == 100)
  4326.             fg_configure_soc(chip);
  4327.     } else if (chip->status == POWER_SUPPLY_STATUS_DISCHARGING) {
  4328.         if (chip->vbat_low_irq_enabled &&
  4329.                 !chip->use_vbat_low_empty_soc) {
  4330.             disable_irq_wake(chip->batt_irq[VBATT_LOW].irq);
  4331.             disable_irq_nosync(chip->batt_irq[VBATT_LOW].irq);
  4332.             chip->vbat_low_irq_enabled = false;
  4333.         }
  4334.  
  4335.         if (chip->full_soc_irq_enabled) {
  4336.             disable_irq_wake(chip->soc_irq[FULL_SOC].irq);
  4337.             disable_irq_nosync(chip->soc_irq[FULL_SOC].irq);
  4338.             chip->full_soc_irq_enabled = false;
  4339.         }
  4340.     }
  4341.     fg_cap_learning_check(chip);
  4342.     fg_vbat_est_check(chip);
  4343.     schedule_work(&chip->update_esr_work);
  4344.  
  4345.     if (chip->wa_flag & USE_CC_SOC_REG) {
  4346.         if (fg_get_cc_soc(chip, &cc_soc)) {
  4347.             pr_err("failed to get CC_SOC\n");
  4348.             return;
  4349.         }
  4350.     }
  4351.  
  4352.     if (chip->prev_status != chip->status && chip->last_sram_update_time) {
  4353.         /*
  4354.          * Reset SW_CC_SOC to a value based off battery SOC when
  4355.          * the device is discharging.
  4356.          */
  4357.         if (chip->status == POWER_SUPPLY_STATUS_DISCHARGING) {
  4358.             batt_soc = get_battery_soc_raw(chip);
  4359.             if (!batt_soc)
  4360.                 return;
  4361.  
  4362.             batt_soc = div64_s64((int64_t)batt_soc *
  4363.                     FULL_PERCENT_28BIT, FULL_PERCENT_3B);
  4364.             rc = fg_mem_write(chip, (u8 *)&batt_soc,
  4365.                 CC_SOC_BASE_REG, 4, CC_SOC_OFFSET, 0);
  4366.             if (rc)
  4367.                 pr_err("Failed to reset CC_SOC_REG rc=%d\n",
  4368.                                     rc);
  4369.             else if (fg_debug_mask & FG_STATUS)
  4370.                 pr_info("Reset SW_CC_SOC to %x\n", batt_soc);
  4371.         }
  4372.  
  4373.         /*
  4374.          * Schedule the update_temp_work whenever there is a status
  4375.          * change. This is essential for applying the slope limiter
  4376.          * coefficients when that feature is enabled.
  4377.          */
  4378.         if (chip->last_temp_update_time && chip->soc_slope_limiter_en) {
  4379.             cancel_delayed_work_sync(&chip->update_temp_work);
  4380.             schedule_delayed_work(&chip->update_temp_work,
  4381.                 msecs_to_jiffies(0));
  4382.         }
  4383.  
  4384.         if (chip->dischg_gain.enable) {
  4385.             fg_stay_awake(&chip->dischg_gain_wakeup_source);
  4386.             schedule_work(&chip->dischg_gain_work);
  4387.         }
  4388.  
  4389.         get_current_time(&current_time);
  4390.         /*
  4391.          * When charging status changes, update SRAM parameters if it
  4392.          * was not updated before 5 seconds from now.
  4393.          */
  4394.         if (chip->last_sram_update_time + 5 < current_time) {
  4395.             cancel_delayed_work(&chip->update_sram_data);
  4396.             schedule_delayed_work(&chip->update_sram_data,
  4397.                 msecs_to_jiffies(0));
  4398.         }
  4399.  
  4400.         if (chip->cyc_ctr.en)
  4401.             schedule_work(&chip->cycle_count_work);
  4402.  
  4403.         if ((chip->wa_flag & USE_CC_SOC_REG) &&
  4404.                 chip->bad_batt_detection_en &&
  4405.                 chip->status == POWER_SUPPLY_STATUS_CHARGING) {
  4406.             chip->sw_cc_soc_data.init_sys_soc = capacity;
  4407.             chip->sw_cc_soc_data.init_cc_soc = cc_soc;
  4408.             if (fg_debug_mask & FG_STATUS)
  4409.                 pr_info(" Init_sys_soc %d init_cc_soc %d\n",
  4410.                     chip->sw_cc_soc_data.init_sys_soc,
  4411.                     chip->sw_cc_soc_data.init_cc_soc);
  4412.         }
  4413.     }
  4414.  
  4415.     if ((chip->wa_flag & USE_CC_SOC_REG) && chip->bad_batt_detection_en
  4416.             && chip->safety_timer_expired) {
  4417.         uint64_t delta_cc_soc = abs(cc_soc -
  4418.                     chip->sw_cc_soc_data.init_cc_soc);
  4419.         chip->sw_cc_soc_data.delta_soc = DIV_ROUND_CLOSEST_ULL(
  4420.                 delta_cc_soc * 100, FULL_PERCENT_28BIT);
  4421.         chip->sw_cc_soc_data.full_capacity =
  4422.             chip->sw_cc_soc_data.delta_soc +
  4423.             chip->sw_cc_soc_data.init_sys_soc;
  4424.         pr_info("Init_sys_soc %d init_cc_soc %d cc_soc %d delta_soc %d full_capacity %d\n",
  4425.                 chip->sw_cc_soc_data.init_sys_soc,
  4426.                 chip->sw_cc_soc_data.init_cc_soc, cc_soc,
  4427.                 chip->sw_cc_soc_data.delta_soc,
  4428.                 chip->sw_cc_soc_data.full_capacity);
  4429.         /*
  4430.          * If sw_cc_soc capacity greater than 150, then it's a bad
  4431.          * battery. else, reset timer and restart charging.
  4432.          */
  4433.         if (chip->sw_cc_soc_data.full_capacity >
  4434.                 MAX_BATTERY_CC_SOC_CAPACITY) {
  4435.             pr_info("Battery possibly damaged, do not restart charging\n");
  4436.         } else {
  4437.             pr_info("Reset safety-timer and restart charging\n");
  4438.             rc = set_prop_enable_charging(chip, false);
  4439.             if (rc) {
  4440.                 pr_err("failed to disable charging %d\n", rc);
  4441.                 return;
  4442.             }
  4443.  
  4444.             chip->safety_timer_expired = false;
  4445.             msleep(200);
  4446.  
  4447.             rc = set_prop_enable_charging(chip, true);
  4448.             if (rc) {
  4449.                 pr_err("failed to enable charging %d\n", rc);
  4450.                 return;
  4451.             }
  4452.         }
  4453.     }
  4454. }
  4455.  
  4456. /*
  4457.  * Check for change in the status of input or OTG and schedule
  4458.  * IADC gain compensation work.
  4459.  */
  4460. static void check_gain_compensation(struct fg_chip *chip)
  4461. {
  4462.     bool input_present = is_input_present(chip);
  4463.     bool otg_present = is_otg_present(chip);
  4464.  
  4465.     if ((chip->wa_flag & IADC_GAIN_COMP_WA)
  4466.         && ((chip->input_present ^ input_present)
  4467.             || (chip->otg_present ^ otg_present))) {
  4468.         fg_stay_awake(&chip->gain_comp_wakeup_source);
  4469.         chip->input_present = input_present;
  4470.         chip->otg_present = otg_present;
  4471.         cancel_work_sync(&chip->gain_comp_work);
  4472.         schedule_work(&chip->gain_comp_work);
  4473.     }
  4474. }
  4475.  
  4476. static void fg_hysteresis_config(struct fg_chip *chip)
  4477. {
  4478.     int hard_hot = 0, hard_cold = 0;
  4479.  
  4480.     hard_hot = get_prop_jeita_temp(chip, FG_MEM_HARD_HOT);
  4481.     hard_cold = get_prop_jeita_temp(chip, FG_MEM_HARD_COLD);
  4482.     if (chip->health == POWER_SUPPLY_HEALTH_OVERHEAT && !chip->batt_hot) {
  4483.         /* turn down the hard hot threshold */
  4484.         chip->batt_hot = true;
  4485.         set_prop_jeita_temp(chip, FG_MEM_HARD_HOT,
  4486.             hard_hot - chip->hot_hysteresis);
  4487.         if (fg_debug_mask & FG_STATUS)
  4488.             pr_info("hard hot hysteresis: old hot=%d, new hot=%d\n",
  4489.                 hard_hot, hard_hot - chip->hot_hysteresis);
  4490.     } else if (chip->health == POWER_SUPPLY_HEALTH_COLD &&
  4491.         !chip->batt_cold) {
  4492.         /* turn up the hard cold threshold */
  4493.         chip->batt_cold = true;
  4494.         set_prop_jeita_temp(chip, FG_MEM_HARD_COLD,
  4495.             hard_cold + chip->cold_hysteresis);
  4496.         if (fg_debug_mask & FG_STATUS)
  4497.             pr_info("hard cold hysteresis: old cold=%d, new cold=%d\n",
  4498.                 hard_cold, hard_cold + chip->hot_hysteresis);
  4499.     } else if (chip->health != POWER_SUPPLY_HEALTH_OVERHEAT &&
  4500.         chip->batt_hot) {
  4501.         /* restore the hard hot threshold */
  4502.         set_prop_jeita_temp(chip, FG_MEM_HARD_HOT,
  4503.             hard_hot + chip->hot_hysteresis);
  4504.         chip->batt_hot = !chip->batt_hot;
  4505.         if (fg_debug_mask & FG_STATUS)
  4506.             pr_info("restore hard hot threshold: old hot=%d, new hot=%d\n",
  4507.                 hard_hot,
  4508.                 hard_hot + chip->hot_hysteresis);
  4509.     } else if (chip->health != POWER_SUPPLY_HEALTH_COLD &&
  4510.         chip->batt_cold) {
  4511.         /* restore the hard cold threshold */
  4512.         set_prop_jeita_temp(chip, FG_MEM_HARD_COLD,
  4513.             hard_cold - chip->cold_hysteresis);
  4514.         chip->batt_cold = !chip->batt_cold;
  4515.         if (fg_debug_mask & FG_STATUS)
  4516.             pr_info("restore hard cold threshold: old cold=%d, new cold=%d\n",
  4517.                 hard_cold,
  4518.                 hard_cold - chip->cold_hysteresis);
  4519.     }
  4520. }
  4521.  
  4522. #define BATT_INFO_STS(base) (base + 0x09)
  4523. #define JEITA_HARD_HOT_RT_STS   BIT(6)
  4524. #define JEITA_HARD_COLD_RT_STS  BIT(5)
  4525. static int fg_init_batt_temp_state(struct fg_chip *chip)
  4526. {
  4527.     int rc = 0;
  4528.     u8 batt_info_sts;
  4529.     int hard_hot = 0, hard_cold = 0;
  4530.  
  4531.     /*
  4532.      * read the batt_info_sts register to parse battery's
  4533.      * initial status and do hysteresis config accordingly.
  4534.      */
  4535.     rc = fg_read(chip, &batt_info_sts,
  4536.         BATT_INFO_STS(chip->batt_base), 1);
  4537.     if (rc) {
  4538.         pr_err("failed to read batt info sts, rc=%d\n", rc);
  4539.         return rc;
  4540.     }
  4541.  
  4542.     hard_hot = get_prop_jeita_temp(chip, FG_MEM_HARD_HOT);
  4543.     hard_cold = get_prop_jeita_temp(chip, FG_MEM_HARD_COLD);
  4544.     chip->batt_hot =
  4545.         (batt_info_sts & JEITA_HARD_HOT_RT_STS) ? true : false;
  4546.     chip->batt_cold =
  4547.         (batt_info_sts & JEITA_HARD_COLD_RT_STS) ? true : false;
  4548.     if (chip->batt_hot || chip->batt_cold) {
  4549.         if (chip->batt_hot) {
  4550.             chip->health = POWER_SUPPLY_HEALTH_OVERHEAT;
  4551.             set_prop_jeita_temp(chip, FG_MEM_HARD_HOT,
  4552.                 hard_hot - chip->hot_hysteresis);
  4553.         } else {
  4554.             chip->health = POWER_SUPPLY_HEALTH_COLD;
  4555.             set_prop_jeita_temp(chip, FG_MEM_HARD_COLD,
  4556.                 hard_cold + chip->cold_hysteresis);
  4557.         }
  4558.     }
  4559.  
  4560.     return rc;
  4561. }
  4562.  
  4563. static int fg_restore_cc_soc(struct fg_chip *chip)
  4564. {
  4565.     int rc;
  4566.  
  4567.     if (!chip->use_last_cc_soc || !chip->last_cc_soc)
  4568.         return 0;
  4569.  
  4570.     if (fg_debug_mask & FG_STATUS)
  4571.         pr_info("Restoring cc_soc: %lld\n", chip->last_cc_soc);
  4572.  
  4573.     rc = fg_mem_write(chip, (u8 *)&chip->last_cc_soc,
  4574.             fg_data[FG_DATA_CC_CHARGE].address, 4,
  4575.             fg_data[FG_DATA_CC_CHARGE].offset, 0);
  4576.     if (rc)
  4577.         pr_err("failed to update CC_SOC rc=%d\n", rc);
  4578.     else
  4579.         chip->use_last_cc_soc = false;
  4580.  
  4581.     return rc;
  4582. }
  4583.  
  4584. #define SRAM_MONOTONIC_SOC_REG      0x574
  4585. #define SRAM_MONOTONIC_SOC_OFFSET   2
  4586. static int fg_restore_soc(struct fg_chip *chip)
  4587. {
  4588.     int rc;
  4589.     u16 msoc;
  4590.  
  4591.     if (chip->use_last_soc && chip->last_soc)
  4592.         msoc = DIV_ROUND_CLOSEST(chip->last_soc * 0xFFFF,
  4593.                 FULL_SOC_RAW);
  4594.     else
  4595.         return 0;
  4596.  
  4597.     if (fg_debug_mask & FG_STATUS)
  4598.         pr_info("Restored soc: %d\n", msoc);
  4599.  
  4600.     rc = fg_mem_write(chip, (u8 *)&msoc, SRAM_MONOTONIC_SOC_REG, 2,
  4601.             SRAM_MONOTONIC_SOC_OFFSET, 0);
  4602.     if (rc)
  4603.         pr_err("failed to write M_SOC_REG rc=%d\n", rc);
  4604.  
  4605.     return rc;
  4606. }
  4607.  
  4608. #define NOM_CAP_REG         0x4F4
  4609. #define CAPACITY_DELTA_DECIPCT      500
  4610. static int load_battery_aging_data(struct fg_chip *chip)
  4611. {
  4612.     int rc = 0;
  4613.     u8 buffer[2];
  4614.     int16_t cc_mah;
  4615.     int64_t delta_cc_uah, pct_nom_cap_uah;
  4616.  
  4617.     rc = fg_mem_read(chip, buffer, NOM_CAP_REG, 2, 0, 0);
  4618.     if (rc) {
  4619.         pr_err("Failed to read nominal capacitance: %d\n", rc);
  4620.         goto out;
  4621.     }
  4622.  
  4623.     chip->nom_cap_uah = bcap_uah_2b(buffer);
  4624.     chip->actual_cap_uah = chip->nom_cap_uah;
  4625.  
  4626.     if (chip->learning_data.learned_cc_uah == 0) {
  4627.         chip->learning_data.learned_cc_uah = chip->nom_cap_uah;
  4628.         fg_cap_learning_save_data(chip);
  4629.     } else if (chip->learning_data.feedback_on) {
  4630.         delta_cc_uah = abs(chip->learning_data.learned_cc_uah -
  4631.                     chip->nom_cap_uah);
  4632.         pct_nom_cap_uah = div64_s64((int64_t)chip->nom_cap_uah *
  4633.                 CAPACITY_DELTA_DECIPCT, 1000);
  4634.         /*
  4635.          * If the learned capacity is out of range, say by 50%
  4636.          * from the nominal capacity, then overwrite the learned
  4637.          * capacity with the nominal capacity.
  4638.          */
  4639.         if (chip->nom_cap_uah && delta_cc_uah > pct_nom_cap_uah) {
  4640.             if (fg_debug_mask & FG_AGING) {
  4641.                 pr_info("learned_cc_uah: %lld is higher than expected\n",
  4642.                     chip->learning_data.learned_cc_uah);
  4643.                 pr_info("Capping it to nominal:%d\n",
  4644.                     chip->nom_cap_uah);
  4645.             }
  4646.             chip->learning_data.learned_cc_uah = chip->nom_cap_uah;
  4647.             fg_cap_learning_save_data(chip);
  4648.         } else {
  4649.             cc_mah = div64_s64(chip->learning_data.learned_cc_uah,
  4650.                     1000);
  4651.             rc = fg_calc_and_store_cc_soc_coeff(chip, cc_mah);
  4652.             if (rc)
  4653.                 pr_err("Error in restoring cc_soc_coeff, rc:%d\n",
  4654.                     rc);
  4655.         }
  4656.     }
  4657. out:
  4658.     return rc;
  4659. }
  4660.  
  4661. static void fg_restore_battery_info(struct fg_chip *chip)
  4662. {
  4663.     int rc;
  4664.     char buf[4] = {0, 0, 0, 0};
  4665.  
  4666.     chip->last_soc = DIV_ROUND_CLOSEST(chip->batt_info[BATT_INFO_SOC] *
  4667.                 FULL_SOC_RAW, FULL_CAPACITY);
  4668.     chip->last_cc_soc = div64_s64((int64_t)chip->last_soc *
  4669.                 FULL_PERCENT_28BIT, FULL_SOC_RAW);
  4670.     chip->use_last_soc = true;
  4671.     chip->use_last_cc_soc = true;
  4672.     rc = fg_restore_soc(chip);
  4673.     if (rc) {
  4674.         pr_err("Error in restoring soc, rc=%d\n", rc);
  4675.         goto out;
  4676.     }
  4677.  
  4678.     rc = fg_restore_cc_soc(chip);
  4679.     if (rc) {
  4680.         pr_err("Error in restoring cc_soc, rc=%d\n", rc);
  4681.         goto out;
  4682.     }
  4683.  
  4684.     rc = fg_mem_write(chip, buf,
  4685.             fg_data[FG_DATA_VINT_ERR].address,
  4686.             fg_data[FG_DATA_VINT_ERR].len,
  4687.             fg_data[FG_DATA_VINT_ERR].offset, 0);
  4688.     if (rc) {
  4689.         pr_err("Failed to write to VINT_ERR, rc=%d\n", rc);
  4690.         goto out;
  4691.     }
  4692.  
  4693.     chip->learning_data.learned_cc_uah = chip->batt_info[BATT_INFO_FCC];
  4694.     rc = load_battery_aging_data(chip);
  4695.     if (rc) {
  4696.         pr_err("Failed to load battery aging data, rc:%d\n", rc);
  4697.         goto out;
  4698.     }
  4699.  
  4700.     if (chip->power_supply_registered)
  4701.         power_supply_changed(&chip->bms_psy);
  4702.  
  4703.     if (fg_debug_mask & FG_STATUS)
  4704.         pr_info("Restored battery info!\n");
  4705.  
  4706. out:
  4707.     return;
  4708. }
  4709.  
  4710. #define DELTA_BATT_TEMP     30
  4711. static bool fg_validate_battery_info(struct fg_chip *chip)
  4712. {
  4713.     int i, delta_pct, batt_id_kohm, batt_temp, batt_volt_mv, batt_soc;
  4714.  
  4715.     for (i = 1; i < BATT_INFO_MAX; i++) {
  4716.         if (fg_debug_mask & FG_STATUS)
  4717.             pr_info("batt_info[%d]: %d\n", i, chip->batt_info[i]);
  4718.  
  4719.         if ((chip->batt_info[i] == 0 && i != BATT_INFO_TEMP) ||
  4720.             chip->batt_info[i] == INT_MAX) {
  4721.             if (fg_debug_mask & FG_STATUS)
  4722.                 pr_info("batt_info[%d]:%d is invalid\n", i,
  4723.                     chip->batt_info[i]);
  4724.             return false;
  4725.         }
  4726.     }
  4727.  
  4728.     batt_id_kohm = get_sram_prop_now(chip, FG_DATA_BATT_ID) / 1000;
  4729.     if (batt_id_kohm != chip->batt_info[BATT_INFO_RES_ID]) {
  4730.         if (fg_debug_mask & FG_STATUS)
  4731.             pr_info("batt_id(%dK) does not match the stored batt_id(%dK)\n",
  4732.                 batt_id_kohm,
  4733.                 chip->batt_info[BATT_INFO_RES_ID]);
  4734.         return false;
  4735.     }
  4736.  
  4737.     batt_temp = get_sram_prop_now(chip, FG_DATA_BATT_TEMP);
  4738.     if (abs(chip->batt_info[BATT_INFO_TEMP] - batt_temp) >
  4739.             DELTA_BATT_TEMP) {
  4740.         if (fg_debug_mask & FG_STATUS)
  4741.             pr_info("batt_temp(%d) is higher/lower than stored batt_temp(%d)\n",
  4742.                 batt_temp, chip->batt_info[BATT_INFO_TEMP]);
  4743.         return false;
  4744.     }
  4745.  
  4746.     if (chip->batt_info[BATT_INFO_FCC] < 0) {
  4747.         if (fg_debug_mask & FG_STATUS)
  4748.             pr_info("batt_fcc cannot be %d\n",
  4749.                 chip->batt_info[BATT_INFO_FCC]);
  4750.         return false;
  4751.     }
  4752.  
  4753.     batt_volt_mv = get_sram_prop_now(chip, FG_DATA_VOLTAGE) / 1000;
  4754.     batt_soc = get_monotonic_soc_raw(chip);
  4755.     if (batt_soc != 0 && batt_soc != FULL_SOC_RAW)
  4756.         batt_soc = DIV_ROUND_CLOSEST((batt_soc - 1) *
  4757.                 (FULL_CAPACITY - 2), FULL_SOC_RAW - 2) + 1;
  4758.  
  4759.     if (*chip->batt_range_ocv && chip->batt_max_voltage_uv > 1000)
  4760.         delta_pct =  DIV_ROUND_CLOSEST(abs(batt_volt_mv -
  4761.                 chip->batt_info[BATT_INFO_VOLTAGE]) * 100,
  4762.                 chip->batt_max_voltage_uv / 1000);
  4763.     else
  4764.         delta_pct = abs(batt_soc - chip->batt_info[BATT_INFO_SOC]);
  4765.  
  4766.     if (fg_debug_mask & FG_STATUS)
  4767.         pr_info("Validating by %s batt_voltage:%d capacity:%d delta_pct:%d\n",
  4768.             *chip->batt_range_ocv ? "OCV" : "SOC", batt_volt_mv,
  4769.             batt_soc, delta_pct);
  4770.  
  4771.     if (*chip->batt_range_pct && delta_pct > *chip->batt_range_pct) {
  4772.         if (fg_debug_mask & FG_STATUS)
  4773.             pr_info("delta_pct(%d) is higher than batt_range_pct(%d)\n",
  4774.                 delta_pct, *chip->batt_range_pct);
  4775.         return false;
  4776.     }
  4777.  
  4778.     return true;
  4779. }
  4780.  
  4781. static int fg_set_battery_info(struct fg_chip *chip, int val)
  4782. {
  4783.     if (chip->batt_info_id < 0 ||
  4784.             chip->batt_info_id >= BATT_INFO_MAX) {
  4785.         pr_err("Invalid batt_info_id %d\n", chip->batt_info_id);
  4786.         chip->batt_info_id = 0;
  4787.         return -EINVAL;
  4788.     }
  4789.  
  4790.     if (chip->batt_info_id == BATT_INFO_NOTIFY && val == INT_MAX - 1) {
  4791.         if (fg_debug_mask & FG_STATUS)
  4792.             pr_info("Notified from userspace\n");
  4793.         if (chip->batt_info_restore && !chip->ima_error_handling) {
  4794.             if (!fg_validate_battery_info(chip)) {
  4795.                 if (fg_debug_mask & FG_STATUS)
  4796.                     pr_info("Validating battery info failed\n");
  4797.             } else {
  4798.                 fg_restore_battery_info(chip);
  4799.             }
  4800.         }
  4801.     }
  4802.  
  4803.     chip->batt_info[chip->batt_info_id] = val;
  4804.     return 0;
  4805. }
  4806.  
  4807. static enum power_supply_property fg_power_props[] = {
  4808.     POWER_SUPPLY_PROP_CAPACITY,
  4809.     POWER_SUPPLY_PROP_CAPACITY_RAW,
  4810.     POWER_SUPPLY_PROP_CURRENT_NOW,
  4811.     POWER_SUPPLY_PROP_VOLTAGE_NOW,
  4812.     POWER_SUPPLY_PROP_VOLTAGE_OCV,
  4813.     POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
  4814.     POWER_SUPPLY_PROP_CHARGE_COUNTER,
  4815.     POWER_SUPPLY_PROP_CHARGE_NOW,
  4816.     POWER_SUPPLY_PROP_CHARGE_NOW_RAW,
  4817.     POWER_SUPPLY_PROP_CHARGE_NOW_ERROR,
  4818.     POWER_SUPPLY_PROP_CHARGE_FULL,
  4819.     POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
  4820.     POWER_SUPPLY_PROP_TEMP,
  4821.     POWER_SUPPLY_PROP_COOL_TEMP,
  4822.     POWER_SUPPLY_PROP_WARM_TEMP,
  4823.     POWER_SUPPLY_PROP_RESISTANCE,
  4824.     POWER_SUPPLY_PROP_RESISTANCE_ID,
  4825.     POWER_SUPPLY_PROP_BATTERY_TYPE,
  4826.     POWER_SUPPLY_PROP_UPDATE_NOW,
  4827.     POWER_SUPPLY_PROP_ESR_COUNT,
  4828.     POWER_SUPPLY_PROP_VOLTAGE_MIN,
  4829.     POWER_SUPPLY_PROP_CYCLE_COUNT,
  4830.     POWER_SUPPLY_PROP_CYCLE_COUNT_ID,
  4831.     POWER_SUPPLY_PROP_HI_POWER,
  4832.     POWER_SUPPLY_PROP_SOC_REPORTING_READY,
  4833.     POWER_SUPPLY_PROP_IGNORE_FALSE_NEGATIVE_ISENSE,
  4834.     POWER_SUPPLY_PROP_ENABLE_JEITA_DETECTION,
  4835.     POWER_SUPPLY_PROP_BATTERY_INFO,
  4836.     POWER_SUPPLY_PROP_BATTERY_INFO_ID,
  4837. };
  4838.  
  4839. static int fg_power_get_property(struct power_supply *psy,
  4840.                        enum power_supply_property psp,
  4841.                        union power_supply_propval *val)
  4842. {
  4843.     struct fg_chip *chip = container_of(psy, struct fg_chip, bms_psy);
  4844.     bool vbatt_low_sts;
  4845.  
  4846.     switch (psp) {
  4847.     case POWER_SUPPLY_PROP_BATTERY_TYPE:
  4848.         if (chip->battery_missing)
  4849.             val->strval = missing_batt_type;
  4850.         else if (chip->fg_restarting)
  4851.             val->strval = loading_batt_type;
  4852.         else
  4853.             val->strval = chip->batt_type;
  4854.         break;
  4855.     case POWER_SUPPLY_PROP_CAPACITY:
  4856.         val->intval = get_prop_capacity(chip);
  4857.         break;
  4858.     case POWER_SUPPLY_PROP_CAPACITY_RAW:
  4859.         val->intval = get_sram_prop_now(chip, FG_DATA_BATT_SOC);
  4860.         break;
  4861.     case POWER_SUPPLY_PROP_CHARGE_NOW_ERROR:
  4862.         val->intval = get_sram_prop_now(chip, FG_DATA_VINT_ERR);
  4863.         break;
  4864.     case POWER_SUPPLY_PROP_CURRENT_NOW:
  4865.         val->intval = get_real_time_prop_value(chip, FG_DATA_CURRENT);
  4866.         break;
  4867.     case POWER_SUPPLY_PROP_VOLTAGE_NOW:
  4868.         val->intval = get_real_time_prop_value(chip, FG_DATA_VOLTAGE);
  4869.         break;
  4870.     case POWER_SUPPLY_PROP_VOLTAGE_OCV:
  4871.         val->intval = get_sram_prop_now(chip, FG_DATA_OCV);
  4872.         break;
  4873.     case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
  4874.         val->intval = chip->batt_max_voltage_uv;
  4875.         break;
  4876.     case POWER_SUPPLY_PROP_TEMP:
  4877.         val->intval = get_sram_prop_now(chip, FG_DATA_BATT_TEMP);
  4878.         if(chip->dummy_battery_fake_temp)
  4879.             val->intval = 250;
  4880.         break;
  4881.     case POWER_SUPPLY_PROP_COOL_TEMP:
  4882.         val->intval = get_prop_jeita_temp(chip, FG_MEM_SOFT_COLD);
  4883.         break;
  4884.     case POWER_SUPPLY_PROP_WARM_TEMP:
  4885.         val->intval = get_prop_jeita_temp(chip, FG_MEM_SOFT_HOT);
  4886.         break;
  4887.     case POWER_SUPPLY_PROP_RESISTANCE:
  4888.         val->intval = get_sram_prop_now(chip, FG_DATA_BATT_ESR);
  4889.         if (!((val->intval < ESR_MAX) && (val->intval > ESR_MIN)))
  4890.             pr_err("ESR Bad: %d mOhm\n", val->intval);
  4891.         break;
  4892.     case POWER_SUPPLY_PROP_ESR_COUNT:
  4893.         val->intval = get_sram_prop_now(chip, FG_DATA_BATT_ESR_COUNT);
  4894.         break;
  4895.     case POWER_SUPPLY_PROP_CYCLE_COUNT:
  4896.         val->intval = fg_get_cycle_count(chip);
  4897.         break;
  4898.     case POWER_SUPPLY_PROP_CYCLE_COUNT_ID:
  4899.         val->intval = chip->cyc_ctr.id;
  4900.         break;
  4901.     case POWER_SUPPLY_PROP_RESISTANCE_ID:
  4902.         val->intval = get_sram_prop_now(chip, FG_DATA_BATT_ID);
  4903.         break;
  4904.     case POWER_SUPPLY_PROP_UPDATE_NOW:
  4905.         val->intval = 0;
  4906.         break;
  4907.     case POWER_SUPPLY_PROP_VOLTAGE_MIN:
  4908.         if (!fg_get_vbatt_status(chip, &vbatt_low_sts))
  4909.             val->intval = (int)vbatt_low_sts;
  4910.         else
  4911.             val->intval = 1;
  4912.         break;
  4913.     case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
  4914.         val->intval = chip->nom_cap_uah;
  4915.         break;
  4916.     case POWER_SUPPLY_PROP_CHARGE_FULL:
  4917.         val->intval = chip->learning_data.learned_cc_uah;
  4918.         break;
  4919.     case POWER_SUPPLY_PROP_CHARGE_NOW:
  4920.         val->intval = chip->learning_data.cc_uah;
  4921.         break;
  4922.     case POWER_SUPPLY_PROP_CHARGE_NOW_RAW:
  4923.         val->intval = get_sram_prop_now(chip, FG_DATA_CC_CHARGE);
  4924.         break;
  4925.     case POWER_SUPPLY_PROP_CHARGE_COUNTER:
  4926.         val->intval = fg_get_current_cc(chip);
  4927.         break;
  4928.     case POWER_SUPPLY_PROP_HI_POWER:
  4929.         val->intval = !!chip->bcl_lpm_disabled;
  4930.         break;
  4931.     case POWER_SUPPLY_PROP_SOC_REPORTING_READY:
  4932.         val->intval = !!chip->soc_reporting_ready;
  4933.         break;
  4934.     case POWER_SUPPLY_PROP_IGNORE_FALSE_NEGATIVE_ISENSE:
  4935.         val->intval = !chip->allow_false_negative_isense;
  4936.         break;
  4937.     case POWER_SUPPLY_PROP_ENABLE_JEITA_DETECTION:
  4938.         val->intval = chip->use_soft_jeita_irq;
  4939.         break;
  4940.     case POWER_SUPPLY_PROP_BATTERY_INFO:
  4941.         if (chip->batt_info_id < 0 ||
  4942.                 chip->batt_info_id >= BATT_INFO_MAX)
  4943.             return -EINVAL;
  4944.         val->intval = chip->batt_info[chip->batt_info_id];
  4945.         break;
  4946.     case POWER_SUPPLY_PROP_BATTERY_INFO_ID:
  4947.         val->intval = chip->batt_info_id;
  4948.         break;
  4949.     default:
  4950.         return -EINVAL;
  4951.     }
  4952.  
  4953.     return 0;
  4954. }
  4955.  
  4956. static int fg_power_set_property(struct power_supply *psy,
  4957.                   enum power_supply_property psp,
  4958.                   const union power_supply_propval *val)
  4959. {
  4960.     struct fg_chip *chip = container_of(psy, struct fg_chip, bms_psy);
  4961.     int rc = 0, unused;
  4962.  
  4963.     switch (psp) {
  4964.     case POWER_SUPPLY_PROP_COOL_TEMP:
  4965.         rc = set_prop_jeita_temp(chip, FG_MEM_SOFT_COLD, val->intval);
  4966.         break;
  4967.     case POWER_SUPPLY_PROP_WARM_TEMP:
  4968.         rc = set_prop_jeita_temp(chip, FG_MEM_SOFT_HOT, val->intval);
  4969.         break;
  4970.     case POWER_SUPPLY_PROP_UPDATE_NOW:
  4971.         if (val->intval)
  4972.             update_sram_data(chip, &unused);
  4973.         break;
  4974.     case POWER_SUPPLY_PROP_IGNORE_FALSE_NEGATIVE_ISENSE:
  4975.         rc = set_prop_ignore_false_negative_isense(chip, !!val->intval);
  4976.         if (rc)
  4977.             pr_err("set_prop_ignore_false_negative_isense failed, rc=%d\n",
  4978.                             rc);
  4979.         else
  4980.             chip->allow_false_negative_isense = !val->intval;
  4981.         break;
  4982.     case POWER_SUPPLY_PROP_ENABLE_JEITA_DETECTION:
  4983.         if (chip->use_soft_jeita_irq == !!val->intval) {
  4984.             pr_debug("JEITA irq %s, ignore!\n",
  4985.                 chip->use_soft_jeita_irq ?
  4986.                 "enabled" : "disabled");
  4987.             break;
  4988.         }
  4989.         chip->use_soft_jeita_irq = !!val->intval;
  4990.         if (chip->use_soft_jeita_irq) {
  4991.             if (chip->batt_irq[JEITA_SOFT_COLD].disabled) {
  4992.                 enable_irq(
  4993.                     chip->batt_irq[JEITA_SOFT_COLD].irq);
  4994.                 chip->batt_irq[JEITA_SOFT_COLD].disabled =
  4995.                                 false;
  4996.             }
  4997.             if (!chip->batt_irq[JEITA_SOFT_COLD].wakeup) {
  4998.                 enable_irq_wake(
  4999.                     chip->batt_irq[JEITA_SOFT_COLD].irq);
  5000.                 chip->batt_irq[JEITA_SOFT_COLD].wakeup = true;
  5001.             }
  5002.             if (chip->batt_irq[JEITA_SOFT_HOT].disabled) {
  5003.                 enable_irq(
  5004.                     chip->batt_irq[JEITA_SOFT_HOT].irq);
  5005.                 chip->batt_irq[JEITA_SOFT_HOT].disabled = false;
  5006.             }
  5007.             if (!chip->batt_irq[JEITA_SOFT_HOT].wakeup) {
  5008.                 enable_irq_wake(
  5009.                     chip->batt_irq[JEITA_SOFT_HOT].irq);
  5010.                 chip->batt_irq[JEITA_SOFT_HOT].wakeup = true;
  5011.             }
  5012.         } else {
  5013.             if (chip->batt_irq[JEITA_SOFT_COLD].wakeup) {
  5014.                 disable_irq_wake(
  5015.                     chip->batt_irq[JEITA_SOFT_COLD].irq);
  5016.                 chip->batt_irq[JEITA_SOFT_COLD].wakeup = false;
  5017.             }
  5018.             if (!chip->batt_irq[JEITA_SOFT_COLD].disabled) {
  5019.                 disable_irq_nosync(
  5020.                     chip->batt_irq[JEITA_SOFT_COLD].irq);
  5021.                 chip->batt_irq[JEITA_SOFT_COLD].disabled = true;
  5022.             }
  5023.             if (chip->batt_irq[JEITA_SOFT_HOT].wakeup) {
  5024.                 disable_irq_wake(
  5025.                     chip->batt_irq[JEITA_SOFT_HOT].irq);
  5026.                 chip->batt_irq[JEITA_SOFT_HOT].wakeup = false;
  5027.             }
  5028.             if (!chip->batt_irq[JEITA_SOFT_HOT].disabled) {
  5029.                 disable_irq_nosync(
  5030.                     chip->batt_irq[JEITA_SOFT_HOT].irq);
  5031.                 chip->batt_irq[JEITA_SOFT_HOT].disabled = true;
  5032.             }
  5033.         }
  5034.         break;
  5035.     case POWER_SUPPLY_PROP_STATUS:
  5036.         chip->prev_status = chip->status;
  5037.         chip->status = val->intval;
  5038.         schedule_work(&chip->status_change_work);
  5039.         check_gain_compensation(chip);
  5040.         break;
  5041.     case POWER_SUPPLY_PROP_HEALTH:
  5042.         chip->health = val->intval;
  5043.         if (chip->health == POWER_SUPPLY_HEALTH_GOOD) {
  5044.             fg_stay_awake(&chip->resume_soc_wakeup_source);
  5045.             schedule_work(&chip->set_resume_soc_work);
  5046.         }
  5047.  
  5048.         if (chip->jeita_hysteresis_support)
  5049.             fg_hysteresis_config(chip);
  5050.         break;
  5051.     case POWER_SUPPLY_PROP_CHARGE_DONE:
  5052.         chip->charge_done = val->intval;
  5053.         pr_info("qpnp_fg:charge_done:soc:%d,VOLT:%d,current:%d\n",
  5054.             get_prop_capacity(chip),
  5055.             get_sram_prop_now(chip, FG_DATA_VOLTAGE),
  5056.             get_sram_prop_now(chip, FG_DATA_CURRENT));
  5057.         if (!chip->resume_soc_lowered) {
  5058.             fg_stay_awake(&chip->resume_soc_wakeup_source);
  5059.             schedule_work(&chip->set_resume_soc_work);
  5060.         }
  5061.         break;
  5062.     case POWER_SUPPLY_PROP_CYCLE_COUNT_ID:
  5063.         if ((val->intval > 0) && (val->intval <= BUCKET_COUNT)) {
  5064.             chip->cyc_ctr.id = val->intval;
  5065.         } else {
  5066.             pr_err("rejecting invalid cycle_count_id = %d\n",
  5067.                                 val->intval);
  5068.             rc = -EINVAL;
  5069.         }
  5070.         break;
  5071.     case POWER_SUPPLY_PROP_SAFETY_TIMER_EXPIRED:
  5072.         chip->safety_timer_expired = val->intval;
  5073.         schedule_work(&chip->status_change_work);
  5074.         break;
  5075.     case POWER_SUPPLY_PROP_HI_POWER:
  5076.         if (chip->wa_flag & BCL_HI_POWER_FOR_CHGLED_WA) {
  5077.             chip->bcl_lpm_disabled = !!val->intval;
  5078.             schedule_work(&chip->bcl_hi_power_work);
  5079.         }
  5080.         break;
  5081.     case POWER_SUPPLY_PROP_CHARGE_FULL:
  5082.         if (chip->learning_data.active) {
  5083.             pr_warn("Capacity learning active!\n");
  5084.             return 0;
  5085.         }
  5086.         if (val->intval <= 0 || val->intval > chip->nom_cap_uah) {
  5087.             pr_err("charge_full is out of bounds\n");
  5088.             return -EINVAL;
  5089.         }
  5090.         chip->learning_data.learned_cc_uah = val->intval;
  5091.         fg_cap_learning_save_data(chip);
  5092.         break;
  5093.     case POWER_SUPPLY_PROP_BATTERY_INFO:
  5094.         rc = fg_set_battery_info(chip, val->intval);
  5095.         break;
  5096.     case POWER_SUPPLY_PROP_BATTERY_INFO_ID:
  5097.         chip->batt_info_id = val->intval;
  5098.         break;
  5099.     default:
  5100.         return -EINVAL;
  5101.     };
  5102.  
  5103.     return rc;
  5104. };
  5105.  
  5106. static int fg_property_is_writeable(struct power_supply *psy,
  5107.                         enum power_supply_property psp)
  5108. {
  5109.     switch (psp) {
  5110.     case POWER_SUPPLY_PROP_COOL_TEMP:
  5111.     case POWER_SUPPLY_PROP_WARM_TEMP:
  5112.     case POWER_SUPPLY_PROP_CYCLE_COUNT_ID:
  5113.     case POWER_SUPPLY_PROP_BATTERY_INFO:
  5114.     case POWER_SUPPLY_PROP_BATTERY_INFO_ID:
  5115.     case POWER_SUPPLY_PROP_CHARGE_FULL:
  5116.         return 1;
  5117.     default:
  5118.         break;
  5119.     }
  5120.  
  5121.     return 0;
  5122. }
  5123.  
  5124. #define SRAM_DUMP_START     0x400
  5125. #define SRAM_DUMP_LEN       0x200
  5126. static void dump_sram(struct work_struct *work)
  5127. {
  5128.     int i, rc;
  5129.     u8 *buffer, rt_sts;
  5130.     char str[16];
  5131.     struct fg_chip *chip = container_of(work,
  5132.                 struct fg_chip,
  5133.                 dump_sram);
  5134.  
  5135.     buffer = devm_kzalloc(chip->dev, SRAM_DUMP_LEN, GFP_KERNEL);
  5136.     if (buffer == NULL) {
  5137.         pr_err("Can't allocate buffer\n");
  5138.         return;
  5139.     }
  5140.  
  5141.     rc = fg_read(chip, &rt_sts, INT_RT_STS(chip->soc_base), 1);
  5142.     if (rc)
  5143.         pr_err("spmi read failed: addr=%03X, rc=%d\n",
  5144.                 INT_RT_STS(chip->soc_base), rc);
  5145.     else
  5146.         pr_info("soc rt_sts: 0x%x\n", rt_sts);
  5147.  
  5148.     rc = fg_read(chip, &rt_sts, INT_RT_STS(chip->batt_base), 1);
  5149.     if (rc)
  5150.         pr_err("spmi read failed: addr=%03X, rc=%d\n",
  5151.                 INT_RT_STS(chip->batt_base), rc);
  5152.     else
  5153.         pr_info("batt rt_sts: 0x%x\n", rt_sts);
  5154.  
  5155.     rc = fg_read(chip, &rt_sts, INT_RT_STS(chip->mem_base), 1);
  5156.     if (rc)
  5157.         pr_err("spmi read failed: addr=%03X, rc=%d\n",
  5158.                 INT_RT_STS(chip->mem_base), rc);
  5159.     else
  5160.         pr_info("memif rt_sts: 0x%x\n", rt_sts);
  5161.  
  5162.     rc = fg_mem_read(chip, buffer, SRAM_DUMP_START, SRAM_DUMP_LEN, 0, 0);
  5163.     if (rc) {
  5164.         pr_err("dump failed: rc = %d\n", rc);
  5165.         return;
  5166.     }
  5167.  
  5168.     for (i = 0; i < SRAM_DUMP_LEN; i += 4) {
  5169.         str[0] = '\0';
  5170.         fill_string(str, DEBUG_PRINT_BUFFER_SIZE, buffer + i, 4);
  5171.         pr_info("%03X %s\n", SRAM_DUMP_START + i, str);
  5172.     }
  5173.     devm_kfree(chip->dev, buffer);
  5174. }
  5175.  
  5176. #define MAXRSCHANGE_REG     0x434
  5177. #define ESR_VALUE_OFFSET    1
  5178. #define ESR_STRICT_VALUE    0x4120391F391F3019
  5179. #define ESR_DEFAULT_VALUE   0x58CD4A6761C34A67
  5180. static void update_esr_value(struct work_struct *work)
  5181. {
  5182.     union power_supply_propval prop = {0, };
  5183.     u64 esr_value = 0;
  5184.     u64 esr_readback = 0;
  5185.     int rc = 0;
  5186.     struct fg_chip *chip = container_of(work,
  5187.                 struct fg_chip,
  5188.                 update_esr_work);
  5189.  
  5190.     if (!is_charger_available(chip))
  5191.         return;
  5192.  
  5193.     power_supply_get_property(chip->batt_psy,
  5194.             POWER_SUPPLY_PROP_CHARGE_TYPE, &prop);
  5195.  
  5196.     rc = fg_mem_read(chip, (u8 *)&esr_readback, MAXRSCHANGE_REG, 8,
  5197.             ESR_VALUE_OFFSET, 0);
  5198.     if (rc)
  5199.         pr_err("read esr failed: rc = %d\n", rc);
  5200.  
  5201.     if ((prop.intval == POWER_SUPPLY_CHARGE_TYPE_TAPER &&
  5202.             chip->status == POWER_SUPPLY_STATUS_CHARGING) ||
  5203.         (chip->status == POWER_SUPPLY_STATUS_FULL)) {
  5204.         if (esr_readback != ESR_STRICT_VALUE) {
  5205.             esr_value = ESR_STRICT_VALUE;
  5206.             rc = fg_mem_write(chip, (u8 *)&esr_value,
  5207.                 MAXRSCHANGE_REG, 8, ESR_VALUE_OFFSET, 0);
  5208.             if (rc)
  5209.                 pr_err("write strict ESR value rc=%d\n", rc);
  5210.             else {
  5211.                 chip->esr_strict_filter = true;
  5212.                 pr_info("ESR: update esr from 0x%llx to 0x%llx\n",
  5213.                         esr_readback, esr_value);
  5214.             }
  5215.         }
  5216.     } else if ((prop.intval != POWER_SUPPLY_CHARGE_TYPE_TAPER &&
  5217.             chip->status == POWER_SUPPLY_STATUS_CHARGING) ||
  5218.         (chip->status == POWER_SUPPLY_STATUS_DISCHARGING)) {
  5219.         if (esr_readback != ESR_DEFAULT_VALUE) {
  5220.             esr_value = ESR_DEFAULT_VALUE;
  5221.             rc = fg_mem_write(chip, (u8 *)&esr_value,
  5222.                 MAXRSCHANGE_REG, 8, ESR_VALUE_OFFSET, 0);
  5223.             if (rc)
  5224.                 pr_err("write default ESR rc=%d\n", rc);
  5225.             else {
  5226.                 chip->esr_strict_filter = false;
  5227.                 pr_info("ESR: update esr from 0x%llx to 0x%llx\n",
  5228.                         esr_readback, esr_value);
  5229.             }
  5230.         }
  5231.     }
  5232. }
  5233.  
  5234. #define TEMP_COUNTER_REG    0x580
  5235. #define VBAT_FILTERED_OFFSET    1
  5236. #define GAIN_REG        0x424
  5237. #define GAIN_OFFSET     1
  5238. #define K_VCOR_REG      0x484
  5239. #define DEF_GAIN_OFFSET     2
  5240. #define PICO_UNIT       0xE8D4A51000LL
  5241. #define ATTO_UNIT       0xDE0B6B3A7640000LL
  5242. #define VBAT_REF        3800000
  5243.  
  5244. /*
  5245.  * IADC Gain compensation steps:
  5246.  * If Input/OTG absent:
  5247.  *  - read VBAT_FILTERED, KVCOR, GAIN
  5248.  *  - calculate the gain compensation using following formula:
  5249.  *    gain = (1 + gain) * (1 + kvcor * (vbat_filtered - 3800000)) - 1;
  5250.  * else
  5251.  *  - reset to the default gain compensation
  5252.  */
  5253. static void iadc_gain_comp_work(struct work_struct *work)
  5254. {
  5255.     u8 reg[4];
  5256.     int rc;
  5257.     uint64_t vbat_filtered;
  5258.     int64_t gain, kvcor, temp, numerator;
  5259.     struct fg_chip *chip = container_of(work, struct fg_chip,
  5260.                             gain_comp_work);
  5261.     bool input_present = is_input_present(chip);
  5262.     bool otg_present = is_otg_present(chip);
  5263.  
  5264.     if (!chip->init_done)
  5265.         goto done;
  5266.  
  5267.     if (!input_present && !otg_present) {
  5268.         /* read VBAT_FILTERED */
  5269.         rc = fg_mem_read(chip, reg, TEMP_COUNTER_REG, 3,
  5270.                         VBAT_FILTERED_OFFSET, 0);
  5271.         if (rc) {
  5272.             pr_err("Failed to read VBAT: rc=%d\n", rc);
  5273.             goto done;
  5274.         }
  5275.         temp = (reg[2] << 16) | (reg[1] << 8) | reg[0];
  5276.         vbat_filtered = div_u64((u64)temp * LSB_24B_NUMRTR,
  5277.                         LSB_24B_DENMTR);
  5278.  
  5279.         /* read K_VCOR */
  5280.         rc = fg_mem_read(chip, reg, K_VCOR_REG, 2, 0, 0);
  5281.         if (rc) {
  5282.             pr_err("Failed to KVCOR rc=%d\n", rc);
  5283.             goto done;
  5284.         }
  5285.         kvcor = half_float(reg);
  5286.  
  5287.         /* calculate gain */
  5288.         numerator = (MICRO_UNIT + chip->iadc_comp_data.dfl_gain)
  5289.             * (PICO_UNIT + kvcor * (vbat_filtered - VBAT_REF))
  5290.             - ATTO_UNIT;
  5291.         gain = div64_s64(numerator, PICO_UNIT);
  5292.  
  5293.         /* write back gain */
  5294.         half_float_to_buffer(gain, reg);
  5295.         rc = fg_mem_write(chip, reg, GAIN_REG, 2, GAIN_OFFSET, 0);
  5296.         if (rc) {
  5297.             pr_err("Failed to write gain reg rc=%d\n", rc);
  5298.             goto done;
  5299.         }
  5300.  
  5301.         if (fg_debug_mask & FG_STATUS)
  5302.             pr_info("IADC gain update [%x %x]\n", reg[1], reg[0]);
  5303.         chip->iadc_comp_data.gain_active = true;
  5304.     } else {
  5305.         /* reset gain register */
  5306.         rc = fg_mem_write(chip, chip->iadc_comp_data.dfl_gain_reg,
  5307.                         GAIN_REG, 2, GAIN_OFFSET, 0);
  5308.         if (rc) {
  5309.             pr_err("unable to write gain comp: %d\n", rc);
  5310.             goto done;
  5311.         }
  5312.  
  5313.         if (fg_debug_mask & FG_STATUS)
  5314.             pr_info("IADC gain reset [%x %x]\n",
  5315.                     chip->iadc_comp_data.dfl_gain_reg[1],
  5316.                     chip->iadc_comp_data.dfl_gain_reg[0]);
  5317.         chip->iadc_comp_data.gain_active = false;
  5318.     }
  5319.  
  5320. done:
  5321.     fg_relax(&chip->gain_comp_wakeup_source);
  5322. }
  5323.  
  5324. static void cc_soc_store_work(struct work_struct *work)
  5325. {
  5326.     struct fg_chip *chip = container_of(work, struct fg_chip,
  5327.                     cc_soc_store_work);
  5328.     int cc_soc_pct;
  5329.  
  5330.     if (!chip->nom_cap_uah) {
  5331.         pr_err("nom_cap_uah zero!\n");
  5332.         fg_relax(&chip->cc_soc_wakeup_source);
  5333.         return;
  5334.     }
  5335.  
  5336.     cc_soc_pct = get_sram_prop_now(chip, FG_DATA_CC_CHARGE);
  5337.     cc_soc_pct = div64_s64(cc_soc_pct * 100,
  5338.                 chip->nom_cap_uah);
  5339.     chip->last_cc_soc = div64_s64((int64_t)chip->last_soc *
  5340.                 FULL_PERCENT_28BIT, FULL_SOC_RAW);
  5341.  
  5342.     if (fg_debug_mask & FG_STATUS)
  5343.         pr_info("cc_soc_pct: %d last_cc_soc: %lld\n", cc_soc_pct,
  5344.             chip->last_cc_soc);
  5345.  
  5346.     if (fg_reset_on_lockup && (chip->cc_soc_limit_pct > 0 &&
  5347.             cc_soc_pct >= chip->cc_soc_limit_pct)) {
  5348.         pr_err("CC_SOC out of range\n");
  5349.         fg_check_ima_error_handling(chip);
  5350.     }
  5351.  
  5352.     fg_relax(&chip->cc_soc_wakeup_source);
  5353. }
  5354.  
  5355. #define HARD_JEITA_ALARM_CHECK_NS   10000000000
  5356. static enum alarmtimer_restart fg_hard_jeita_alarm_cb(struct alarm *alarm,
  5357.                         ktime_t now)
  5358. {
  5359.     struct fg_chip *chip = container_of(alarm,
  5360.             struct fg_chip, hard_jeita_alarm);
  5361.     int rc, health = POWER_SUPPLY_HEALTH_UNKNOWN;
  5362.     u8 regval;
  5363.     bool batt_hot, batt_cold;
  5364.     union power_supply_propval val = {0, };
  5365.  
  5366.     if (!is_usb_present(chip)) {
  5367.         pr_debug("USB plugged out, stop the timer!\n");
  5368.         return ALARMTIMER_NORESTART;
  5369.     }
  5370.  
  5371.     rc = fg_read(chip, &regval, BATT_INFO_STS(chip->batt_base), 1);
  5372.     if (rc) {
  5373.         pr_err("read batt_sts failed, rc=%d\n", rc);
  5374.         goto recheck;
  5375.     }
  5376.  
  5377.     batt_hot = !!(regval & JEITA_HARD_HOT_RT_STS);
  5378.     batt_cold = !!(regval & JEITA_HARD_COLD_RT_STS);
  5379.     if (batt_hot && batt_cold) {
  5380.         pr_debug("Hot && cold can't co-exist\n");
  5381.         goto recheck;
  5382.     }
  5383.  
  5384.     if ((batt_hot == chip->batt_hot) && (batt_cold == chip->batt_cold)) {
  5385.         pr_debug("battery JEITA state not changed, ignore\n");
  5386.         goto recheck;
  5387.     }
  5388.  
  5389.     if (batt_cold != chip->batt_cold) {
  5390.         /* cool --> cold */
  5391.         if (chip->batt_cool) {
  5392.             chip->batt_cool = false;
  5393.             chip->batt_cold = true;
  5394.             health = POWER_SUPPLY_HEALTH_COLD;
  5395.         } else if (chip->batt_cold) { /* cold --> cool */
  5396.             chip->batt_cool = true;
  5397.             chip->batt_cold = false;
  5398.             health = POWER_SUPPLY_HEALTH_COOL;
  5399.         }
  5400.     }
  5401.  
  5402.     if (batt_hot != chip->batt_hot) {
  5403.         /* warm --> hot */
  5404.         if (chip->batt_warm) {
  5405.             chip->batt_warm = false;
  5406.             chip->batt_hot = true;
  5407.             health = POWER_SUPPLY_HEALTH_OVERHEAT;
  5408.         } else if (chip->batt_hot) { /* hot --> warm */
  5409.             chip->batt_hot = false;
  5410.             chip->batt_warm = true;
  5411.             health = POWER_SUPPLY_HEALTH_WARM;
  5412.         }
  5413.     }
  5414.  
  5415.     if (health != POWER_SUPPLY_HEALTH_UNKNOWN) {
  5416.         pr_debug("FG report battery health: %d\n", health);
  5417.         val.intval = health;
  5418.         rc = power_supply_set_property(chip->batt_psy,
  5419.                 POWER_SUPPLY_PROP_HEALTH, &val);
  5420.         if (rc)
  5421.             pr_err("Set batt_psy health: %d failed\n", health);
  5422.     }
  5423.  
  5424. recheck:
  5425.     alarm_forward_now(alarm, ns_to_ktime(HARD_JEITA_ALARM_CHECK_NS));
  5426.     return ALARMTIMER_RESTART;
  5427. }
  5428.  
  5429. #define BATT_SOFT_COLD_STS  BIT(0)
  5430. #define BATT_SOFT_HOT_STS   BIT(1)
  5431. static irqreturn_t fg_jeita_soft_hot_irq_handler(int irq, void *_chip)
  5432. {
  5433.     int rc;
  5434.     struct fg_chip *chip = _chip;
  5435.     u8 regval;
  5436.     bool batt_warm;
  5437.     union power_supply_propval val = {0, };
  5438.  
  5439.     if (!is_charger_available(chip))
  5440.         return IRQ_HANDLED;
  5441.  
  5442.     rc = fg_read(chip, &regval, INT_RT_STS(chip->batt_base), 1);
  5443.     if (rc) {
  5444.         pr_err("spmi read failed: addr=%03X, rc=%d\n",
  5445.                 INT_RT_STS(chip->batt_base), rc);
  5446.         return IRQ_HANDLED;
  5447.     }
  5448.  
  5449.     batt_warm = !!(regval & BATT_SOFT_HOT_STS);
  5450.     if (chip->batt_warm == batt_warm) {
  5451.         pr_debug("warm state not change, ignore!\n");
  5452.         return IRQ_HANDLED;
  5453.     }
  5454.  
  5455.     chip->batt_warm = batt_warm;
  5456.     if (batt_warm) {
  5457.         val.intval = POWER_SUPPLY_HEALTH_WARM;
  5458.         power_supply_set_property(chip->batt_psy,
  5459.             POWER_SUPPLY_PROP_HEALTH, &val);
  5460.         /* kick the alarm timer for hard hot polling */
  5461.         rc = alarm_start_relative(&chip->hard_jeita_alarm,
  5462.                 ns_to_ktime(HARD_JEITA_ALARM_CHECK_NS));
  5463.         if (rc)
  5464.             pr_err("start alarm for hard HOT detection failed, rc=%d\n",
  5465.                                     rc);
  5466.     } else {
  5467.         val.intval = POWER_SUPPLY_HEALTH_GOOD;
  5468.         power_supply_set_property(chip->batt_psy,
  5469.             POWER_SUPPLY_PROP_HEALTH, &val);
  5470.         /* cancel the alarm timer */
  5471.         alarm_try_to_cancel(&chip->hard_jeita_alarm);
  5472.     }
  5473.  
  5474.     return IRQ_HANDLED;
  5475. }
  5476.  
  5477. static irqreturn_t fg_jeita_soft_cold_irq_handler(int irq, void *_chip)
  5478. {
  5479.     int rc;
  5480.     struct fg_chip *chip = _chip;
  5481.     u8 regval;
  5482.     bool batt_cool;
  5483.     union power_supply_propval val = {0, };
  5484.  
  5485.     if (!is_charger_available(chip))
  5486.         return IRQ_HANDLED;
  5487.  
  5488.     rc = fg_read(chip, &regval, INT_RT_STS(chip->batt_base), 1);
  5489.     if (rc) {
  5490.         pr_err("spmi read failed: addr=%03X, rc=%d\n",
  5491.                 INT_RT_STS(chip->batt_base), rc);
  5492.         return IRQ_HANDLED;
  5493.     }
  5494.  
  5495.     batt_cool = !!(regval & BATT_SOFT_COLD_STS);
  5496.     if (chip->batt_cool == batt_cool) {
  5497.         pr_debug("cool state not change, ignore\n");
  5498.         return IRQ_HANDLED;
  5499.     }
  5500.  
  5501.     chip->batt_cool = batt_cool;
  5502.     if (batt_cool) {
  5503.         val.intval = POWER_SUPPLY_HEALTH_COOL;
  5504.         power_supply_set_property(chip->batt_psy,
  5505.             POWER_SUPPLY_PROP_HEALTH, &val);
  5506.         /* kick the alarm timer for hard cold polling */
  5507.         rc = alarm_start_relative(&chip->hard_jeita_alarm,
  5508.                 ns_to_ktime(HARD_JEITA_ALARM_CHECK_NS));
  5509.         if (rc)
  5510.             pr_err("start alarm for hard COLD detection failed, rc=%d\n",
  5511.                                     rc);
  5512.     } else {
  5513.         val.intval = POWER_SUPPLY_HEALTH_GOOD;
  5514.         power_supply_set_property(chip->batt_psy,
  5515.             POWER_SUPPLY_PROP_HEALTH, &val);
  5516.         /* cancel the alarm timer */
  5517.         alarm_try_to_cancel(&chip->hard_jeita_alarm);
  5518.     }
  5519.  
  5520.     return IRQ_HANDLED;
  5521. }
  5522.  
  5523. #define SOC_FIRST_EST_DONE  BIT(5)
  5524. static bool is_first_est_done(struct fg_chip *chip)
  5525. {
  5526.     int rc;
  5527.     u8 fg_soc_sts;
  5528.  
  5529.     rc = fg_read(chip, &fg_soc_sts,
  5530.                  INT_RT_STS(chip->soc_base), 1);
  5531.     if (rc) {
  5532.         pr_err("spmi read failed: addr=%03X, rc=%d\n",
  5533.                 INT_RT_STS(chip->soc_base), rc);
  5534.         return false;
  5535.     }
  5536.  
  5537.     return (fg_soc_sts & SOC_FIRST_EST_DONE) ? true : false;
  5538. }
  5539.  
  5540. #define FG_EMPTY_DEBOUNCE_MS    1500
  5541. static irqreturn_t fg_vbatt_low_handler(int irq, void *_chip)
  5542. {
  5543.     struct fg_chip *chip = _chip;
  5544.     bool vbatt_low_sts;
  5545.  
  5546.     if (fg_debug_mask & FG_IRQS)
  5547.         pr_info("vbatt-low triggered\n");
  5548.  
  5549.     /* handle empty soc based on vbatt-low interrupt */
  5550.     if (chip->use_vbat_low_empty_soc) {
  5551.         if (fg_get_vbatt_status(chip, &vbatt_low_sts))
  5552.             goto out;
  5553.  
  5554.         if (vbatt_low_sts) {
  5555.             if (fg_debug_mask & FG_IRQS)
  5556.                 pr_info("Vbatt is low\n");
  5557.             disable_irq_wake(chip->batt_irq[VBATT_LOW].irq);
  5558.             disable_irq_nosync(chip->batt_irq[VBATT_LOW].irq);
  5559.             chip->vbat_low_irq_enabled = false;
  5560.             fg_stay_awake(&chip->empty_check_wakeup_source);
  5561.             schedule_delayed_work(&chip->check_empty_work,
  5562.                 msecs_to_jiffies(FG_EMPTY_DEBOUNCE_MS));
  5563.         } else {
  5564.             if (fg_debug_mask & FG_IRQS)
  5565.                 pr_info("Vbatt is high\n");
  5566.             chip->soc_empty = false;
  5567.         }
  5568.         goto out;
  5569.     }
  5570.  
  5571.     if (chip->status == POWER_SUPPLY_STATUS_CHARGING) {
  5572.         if (fg_get_vbatt_status(chip, &vbatt_low_sts))
  5573.             goto out;
  5574.         if (!vbatt_low_sts && chip->vbat_low_irq_enabled) {
  5575.             if (fg_debug_mask & FG_IRQS)
  5576.                 pr_info("disabling vbatt_low irq\n");
  5577.             disable_irq_wake(chip->batt_irq[VBATT_LOW].irq);
  5578.             disable_irq_nosync(chip->batt_irq[VBATT_LOW].irq);
  5579.             chip->vbat_low_irq_enabled = false;
  5580.         }
  5581.     }
  5582.     if (chip->power_supply_registered)
  5583.         power_supply_changed(&chip->bms_psy);
  5584. out:
  5585.     return IRQ_HANDLED;
  5586. }
  5587.  
  5588. static irqreturn_t fg_batt_missing_irq_handler(int irq, void *_chip)
  5589. {
  5590.     struct fg_chip *chip = _chip;
  5591.     bool batt_missing = is_battery_missing(chip);
  5592.  
  5593.     if (batt_missing) {
  5594.         fg_cap_learning_stop(chip);
  5595.         chip->battery_missing = true;
  5596.         chip->profile_loaded = false;
  5597.         chip->soc_reporting_ready = false;
  5598.         chip->batt_type = default_batt_type;
  5599.         mutex_lock(&chip->cyc_ctr.lock);
  5600.         if (fg_debug_mask & FG_IRQS)
  5601.             pr_info("battery missing, clearing cycle counters\n");
  5602.         clear_cycle_counter(chip);
  5603.         mutex_unlock(&chip->cyc_ctr.lock);
  5604.     } else {
  5605.         if (!chip->use_otp_profile)
  5606.             fg_handle_battery_insertion(chip);
  5607.         else
  5608.             chip->battery_missing = false;
  5609.     }
  5610.  
  5611.     if (fg_debug_mask & FG_IRQS)
  5612.         pr_info("batt-missing triggered: %s\n",
  5613.                 batt_missing ? "missing" : "present");
  5614.  
  5615.     if (chip->power_supply_registered)
  5616.         power_supply_changed(&chip->bms_psy);
  5617.     return IRQ_HANDLED;
  5618. }
  5619.  
  5620. static irqreturn_t fg_mem_avail_irq_handler(int irq, void *_chip)
  5621. {
  5622.     struct fg_chip *chip = _chip;
  5623.     u8 mem_if_sts;
  5624.     int rc;
  5625.  
  5626.     rc = fg_read(chip, &mem_if_sts, INT_RT_STS(chip->mem_base), 1);
  5627.     if (rc) {
  5628.         pr_err("failed to read mem status rc=%d\n", rc);
  5629.         return IRQ_HANDLED;
  5630.     }
  5631.  
  5632.     if (fg_check_sram_access(chip)) {
  5633.         if ((fg_debug_mask & FG_IRQS)
  5634.                 & (FG_MEM_DEBUG_READS | FG_MEM_DEBUG_WRITES))
  5635.             pr_info("sram access granted\n");
  5636.         reinit_completion(&chip->sram_access_revoked);
  5637.         complete_all(&chip->sram_access_granted);
  5638.     } else {
  5639.         if ((fg_debug_mask & FG_IRQS)
  5640.                 & (FG_MEM_DEBUG_READS | FG_MEM_DEBUG_WRITES))
  5641.             pr_info("sram access revoked\n");
  5642.         complete_all(&chip->sram_access_revoked);
  5643.     }
  5644.  
  5645.     if (!rc && (fg_debug_mask & FG_IRQS)
  5646.             & (FG_MEM_DEBUG_READS | FG_MEM_DEBUG_WRITES))
  5647.         pr_info("mem_if sts 0x%02x\n", mem_if_sts);
  5648.  
  5649.     return IRQ_HANDLED;
  5650. }
  5651.  
  5652. static irqreturn_t fg_soc_irq_handler(int irq, void *_chip)
  5653. {
  5654.     struct fg_chip *chip = _chip;
  5655.     u8 soc_rt_sts;
  5656.     int rc, msoc;
  5657.  
  5658.     rc = fg_read(chip, &soc_rt_sts, INT_RT_STS(chip->soc_base), 1);
  5659.     if (rc) {
  5660.         pr_err("spmi read failed: addr=%03X, rc=%d\n",
  5661.                 INT_RT_STS(chip->soc_base), rc);
  5662.     }
  5663.  
  5664.     if (fg_debug_mask & FG_IRQS)
  5665.         pr_info("triggered 0x%x\n", soc_rt_sts);
  5666.  
  5667.     rc = fg_check_system_config(chip);
  5668.     if (rc)
  5669.         pr_err("Failed to check system config rc=%d\n", rc);
  5670.  
  5671.     if (chip->dischg_gain.enable) {
  5672.         fg_stay_awake(&chip->dischg_gain_wakeup_source);
  5673.         schedule_work(&chip->dischg_gain_work);
  5674.     }
  5675.  
  5676.     if (chip->soc_slope_limiter_en) {
  5677.         fg_stay_awake(&chip->slope_limit_wakeup_source);
  5678.         schedule_work(&chip->slope_limiter_work);
  5679.     }
  5680.  
  5681.     /* Backup last soc every delta soc interrupt */
  5682.     chip->use_last_soc = false;
  5683.     if (fg_reset_on_lockup) {
  5684.         if (!chip->ima_error_handling)
  5685.             chip->last_soc = get_monotonic_soc_raw(chip);
  5686.         if (fg_debug_mask & FG_STATUS)
  5687.             pr_info("last_soc: %d\n", chip->last_soc);
  5688.  
  5689.         fg_stay_awake(&chip->cc_soc_wakeup_source);
  5690.         schedule_work(&chip->cc_soc_store_work);
  5691.     }
  5692.  
  5693.     if (chip->use_vbat_low_empty_soc) {
  5694.         msoc = get_monotonic_soc_raw(chip);
  5695.         if (msoc == 0 || chip->soc_empty) {
  5696.             fg_stay_awake(&chip->empty_check_wakeup_source);
  5697.             schedule_delayed_work(&chip->check_empty_work,
  5698.                 msecs_to_jiffies(FG_EMPTY_DEBOUNCE_MS));
  5699.         }
  5700.     }
  5701.  
  5702.     schedule_work(&chip->battery_age_work);
  5703.  
  5704.     if (chip->power_supply_registered)
  5705.         power_supply_changed(&chip->bms_psy);
  5706.  
  5707.     if (chip->rslow_comp.chg_rs_to_rslow > 0 &&
  5708.             chip->rslow_comp.chg_rslow_comp_c1 > 0 &&
  5709.             chip->rslow_comp.chg_rslow_comp_c2 > 0)
  5710.         schedule_work(&chip->rslow_comp_work);
  5711.  
  5712.     if (chip->cyc_ctr.en)
  5713.         schedule_work(&chip->cycle_count_work);
  5714.  
  5715.     schedule_work(&chip->update_esr_work);
  5716.  
  5717.     if (chip->charge_full)
  5718.         schedule_work(&chip->charge_full_work);
  5719.  
  5720.     if (chip->wa_flag & IADC_GAIN_COMP_WA
  5721.             && chip->iadc_comp_data.gain_active) {
  5722.         fg_stay_awake(&chip->gain_comp_wakeup_source);
  5723.         schedule_work(&chip->gain_comp_work);
  5724.     }
  5725.  
  5726.     if (chip->wa_flag & USE_CC_SOC_REG
  5727.             && chip->learning_data.active) {
  5728.         fg_stay_awake(&chip->capacity_learning_wakeup_source);
  5729.         schedule_work(&chip->fg_cap_learning_work);
  5730.     }
  5731.  
  5732.     if (chip->esr_pulse_tune_en) {
  5733.         fg_stay_awake(&chip->esr_extract_wakeup_source);
  5734.         schedule_work(&chip->esr_extract_config_work);
  5735.     }
  5736.  
  5737.     return IRQ_HANDLED;
  5738. }
  5739.  
  5740. static irqreturn_t fg_empty_soc_irq_handler(int irq, void *_chip)
  5741. {
  5742.     struct fg_chip *chip = _chip;
  5743.     u8 soc_rt_sts;
  5744.     int rc;
  5745.  
  5746.     rc = fg_read(chip, &soc_rt_sts, INT_RT_STS(chip->soc_base), 1);
  5747.     if (rc) {
  5748.         pr_err("spmi read failed: addr=%03X, rc=%d\n",
  5749.                 INT_RT_STS(chip->soc_base), rc);
  5750.         goto done;
  5751.     }
  5752.  
  5753.     pr_info("triggered 0x%x\n", soc_rt_sts);
  5754.  
  5755.     if (soc_rt_sts & SOC_EMPTY) {
  5756.         chip->soc_empty = true;
  5757.         fg_stay_awake(&chip->empty_check_wakeup_source);
  5758.         schedule_delayed_work(&chip->check_empty_work,
  5759.             msecs_to_jiffies(FG_EMPTY_DEBOUNCE_MS));
  5760.     } else {
  5761.         chip->soc_empty = false;
  5762.         fg_relax(&chip->empty_check_wakeup_source);
  5763.     }
  5764.  
  5765. done:
  5766.     return IRQ_HANDLED;
  5767. }
  5768.  
  5769. static irqreturn_t fg_first_soc_irq_handler(int irq, void *_chip)
  5770. {
  5771.     struct fg_chip *chip = _chip;
  5772.  
  5773.     if (fg_debug_mask & FG_IRQS)
  5774.         pr_info("triggered\n");
  5775.  
  5776.     if (fg_est_dump)
  5777.         schedule_work(&chip->dump_sram);
  5778.  
  5779.     if (chip->power_supply_registered)
  5780.         power_supply_changed(&chip->bms_psy);
  5781.  
  5782.     complete_all(&chip->first_soc_done);
  5783.  
  5784.     return IRQ_HANDLED;
  5785. }
  5786.  
  5787. static void fg_external_power_changed(struct power_supply *psy)
  5788. {
  5789.     struct fg_chip *chip = container_of(psy, struct fg_chip, bms_psy);
  5790.     bool input_present = is_input_present(chip);
  5791.  
  5792.     if (input_present ^ chip->rslow_comp.active &&
  5793.             chip->rslow_comp.chg_rs_to_rslow > 0 &&
  5794.             chip->rslow_comp.chg_rslow_comp_c1 > 0 &&
  5795.             chip->rslow_comp.chg_rslow_comp_c2 > 0)
  5796.         schedule_work(&chip->rslow_comp_work);
  5797.     if (!input_present && chip->resume_soc_lowered) {
  5798.         fg_stay_awake(&chip->resume_soc_wakeup_source);
  5799.         schedule_work(&chip->set_resume_soc_work);
  5800.     }
  5801.     if (!input_present && chip->charge_full)
  5802.         schedule_work(&chip->charge_full_work);
  5803. }
  5804.  
  5805. static void set_resume_soc_work(struct work_struct *work)
  5806. {
  5807.     struct fg_chip *chip = container_of(work,
  5808.                 struct fg_chip,
  5809.                 set_resume_soc_work);
  5810.     int rc, resume_soc_raw;
  5811.  
  5812.     if (is_input_present(chip) && !chip->resume_soc_lowered) {
  5813.         if (!chip->charge_done)
  5814.             goto done;
  5815.         resume_soc_raw = get_monotonic_soc_raw(chip)
  5816.             - (0xFF - settings[FG_MEM_RESUME_SOC].value);
  5817.         if (resume_soc_raw > 0 && resume_soc_raw < FULL_SOC_RAW) {
  5818.             rc = fg_set_resume_soc(chip, resume_soc_raw);
  5819.             if (rc) {
  5820.                 pr_err("Couldn't set resume SOC for FG\n");
  5821.                 goto done;
  5822.             }
  5823.             if (fg_debug_mask & FG_STATUS) {
  5824.                 pr_info("resume soc lowered to 0x%02x\n",
  5825.                         resume_soc_raw);
  5826.             }
  5827.         } else if (settings[FG_MEM_RESUME_SOC].value > 0) {
  5828.             pr_err("bad resume soc 0x%02x\n", resume_soc_raw);
  5829.         }
  5830.         chip->charge_done = false;
  5831.         chip->resume_soc_lowered = true;
  5832.     } else if (chip->resume_soc_lowered && (!is_input_present(chip)
  5833.                 || chip->health == POWER_SUPPLY_HEALTH_GOOD)) {
  5834.         resume_soc_raw = settings[FG_MEM_RESUME_SOC].value;
  5835.         if (resume_soc_raw > 0 && resume_soc_raw < FULL_SOC_RAW) {
  5836.             rc = fg_set_resume_soc(chip, resume_soc_raw);
  5837.             if (rc) {
  5838.                 pr_err("Couldn't set resume SOC for FG\n");
  5839.                 goto done;
  5840.             }
  5841.             if (fg_debug_mask & FG_STATUS) {
  5842.                 pr_info("resume soc set to 0x%02x\n",
  5843.                         resume_soc_raw);
  5844.             }
  5845.         } else if (settings[FG_MEM_RESUME_SOC].value > 0) {
  5846.             pr_err("bad resume soc 0x%02x\n", resume_soc_raw);
  5847.         }
  5848.         chip->resume_soc_lowered = false;
  5849.     }
  5850. done:
  5851.     fg_relax(&chip->resume_soc_wakeup_source);
  5852. }
  5853.  
  5854. #define OCV_COEFFS_START_REG        0x4C0
  5855. #define OCV_JUNCTION_REG        0x4D8
  5856. #define CUTOFF_VOLTAGE_REG      0x40C
  5857. #define RSLOW_CFG_REG           0x538
  5858. #define RSLOW_CFG_OFFSET        2
  5859. #define RSLOW_THRESH_REG        0x52C
  5860. #define RSLOW_THRESH_OFFSET     0
  5861. #define RS_TO_RSLOW_CHG_OFFSET      2
  5862. #define RS_TO_RSLOW_DISCHG_OFFSET   0
  5863. #define RSLOW_COMP_REG          0x528
  5864. #define RSLOW_COMP_C1_OFFSET        0
  5865. #define RSLOW_COMP_C2_OFFSET        2
  5866. #define BATT_PROFILE_OFFSET     0x4C0
  5867. static void get_default_rslow_comp_settings(struct fg_chip *chip)
  5868. {
  5869.     int offset;
  5870.  
  5871.     offset = RSLOW_CFG_REG + RSLOW_CFG_OFFSET - BATT_PROFILE_OFFSET;
  5872.     memcpy(&chip->rslow_comp.rslow_cfg, chip->batt_profile + offset, 1);
  5873.  
  5874.     offset = RSLOW_THRESH_REG + RSLOW_THRESH_OFFSET - BATT_PROFILE_OFFSET;
  5875.     memcpy(&chip->rslow_comp.rslow_thr, chip->batt_profile + offset, 1);
  5876.  
  5877.     offset = TEMP_RS_TO_RSLOW_REG + RS_TO_RSLOW_CHG_OFFSET -
  5878.         BATT_PROFILE_OFFSET;
  5879.     memcpy(&chip->rslow_comp.rs_to_rslow, chip->batt_profile + offset, 2);
  5880.  
  5881.     offset = RSLOW_COMP_REG + RSLOW_COMP_C1_OFFSET - BATT_PROFILE_OFFSET;
  5882.     memcpy(&chip->rslow_comp.rslow_comp, chip->batt_profile + offset, 4);
  5883. }
  5884.  
  5885. static int populate_system_data(struct fg_chip *chip)
  5886. {
  5887.     u8 buffer[24];
  5888.     int rc, i;
  5889.  
  5890.     fg_mem_lock(chip);
  5891.     rc = fg_mem_read(chip, buffer, OCV_COEFFS_START_REG, 24, 0, 0);
  5892.     if (rc) {
  5893.         pr_err("Failed to read ocv coefficients: %d\n", rc);
  5894.         goto done;
  5895.     }
  5896.     for (i = 0; i < 12; i += 1)
  5897.         chip->ocv_coeffs[i] = half_float(buffer + (i * 2));
  5898.     if (fg_debug_mask & FG_AGING) {
  5899.         pr_info("coeffs1 = %lld %lld %lld %lld\n",
  5900.                 chip->ocv_coeffs[0], chip->ocv_coeffs[1],
  5901.                 chip->ocv_coeffs[2], chip->ocv_coeffs[3]);
  5902.         pr_info("coeffs2 = %lld %lld %lld %lld\n",
  5903.                 chip->ocv_coeffs[4], chip->ocv_coeffs[5],
  5904.                 chip->ocv_coeffs[6], chip->ocv_coeffs[7]);
  5905.         pr_info("coeffs3 = %lld %lld %lld %lld\n",
  5906.                 chip->ocv_coeffs[8], chip->ocv_coeffs[9],
  5907.                 chip->ocv_coeffs[10], chip->ocv_coeffs[11]);
  5908.     }
  5909.     rc = fg_mem_read(chip, buffer, OCV_JUNCTION_REG, 2, 0, 0);
  5910.     if (rc) {
  5911.         pr_err("Failed to read ocv junctions: %d\n", rc);
  5912.         goto done;
  5913.     }
  5914.  
  5915.     chip->ocv_junction_p1p2 = buffer[0] * 100 / 255;
  5916.     chip->ocv_junction_p2p3 = buffer[1] * 100 / 255;
  5917.  
  5918.     rc = load_battery_aging_data(chip);
  5919.     if (rc) {
  5920.         pr_err("Failed to load battery aging data, rc:%d\n", rc);
  5921.         goto done;
  5922.     }
  5923.  
  5924.     rc = fg_mem_read(chip, buffer, CUTOFF_VOLTAGE_REG, 2, 0, 0);
  5925.     if (rc) {
  5926.         pr_err("Failed to read cutoff voltage: %d\n", rc);
  5927.         goto done;
  5928.     }
  5929.  
  5930.     chip->cutoff_voltage = voltage_2b(buffer);
  5931.     if (fg_debug_mask & FG_AGING)
  5932.         pr_info("cutoff_voltage = %lld, nom_cap_uah = %d p1p2 = %d, p2p3 = %d\n",
  5933.                 chip->cutoff_voltage, chip->nom_cap_uah,
  5934.                 chip->ocv_junction_p1p2,
  5935.                 chip->ocv_junction_p2p3);
  5936.  
  5937.     get_default_rslow_comp_settings(chip);
  5938. done:
  5939.     fg_mem_release(chip);
  5940.     return rc;
  5941. }
  5942.  
  5943. static int fg_update_batt_rslow_settings(struct fg_chip *chip)
  5944. {
  5945.     int64_t rs_to_rslow_chg, rs_to_rslow_dischg, batt_esr, rconn_uohm;
  5946.     u8 buffer[2];
  5947.     int rc;
  5948.  
  5949.     rc = fg_mem_read(chip, buffer, BATTERY_ESR_REG, 2, ESR_OFFSET, 0);
  5950.     if (rc) {
  5951.         pr_err("unable to read battery_esr: %d\n", rc);
  5952.         goto done;
  5953.     }
  5954.     batt_esr = half_float(buffer);
  5955.  
  5956.     rc = fg_mem_read(chip, buffer, TEMP_RS_TO_RSLOW_REG, 2,
  5957.             RS_TO_RSLOW_DISCHG_OFFSET, 0);
  5958.     if (rc) {
  5959.         pr_err("unable to read rs to rslow dischg: %d\n", rc);
  5960.         goto done;
  5961.     }
  5962.     rs_to_rslow_dischg = half_float(buffer);
  5963.  
  5964.     rc = fg_mem_read(chip, buffer, TEMP_RS_TO_RSLOW_REG, 2,
  5965.             RS_TO_RSLOW_CHG_OFFSET, 0);
  5966.     if (rc) {
  5967.         pr_err("unable to read rs to rslow chg: %d\n", rc);
  5968.         goto done;
  5969.     }
  5970.     rs_to_rslow_chg = half_float(buffer);
  5971.  
  5972.     if (fg_debug_mask & FG_STATUS)
  5973.         pr_info("rs_rslow_chg: %lld, rs_rslow_dischg: %lld, esr: %lld\n",
  5974.             rs_to_rslow_chg, rs_to_rslow_dischg, batt_esr);
  5975.  
  5976.     rconn_uohm = chip->rconn_mohm * 1000;
  5977.     rs_to_rslow_dischg = div64_s64(rs_to_rslow_dischg * batt_esr,
  5978.                     batt_esr + rconn_uohm);
  5979.     rs_to_rslow_chg = div64_s64(rs_to_rslow_chg * batt_esr,
  5980.                     batt_esr + rconn_uohm);
  5981.  
  5982.     half_float_to_buffer(rs_to_rslow_chg, buffer);
  5983.     rc = fg_mem_write(chip, buffer, TEMP_RS_TO_RSLOW_REG, 2,
  5984.             RS_TO_RSLOW_CHG_OFFSET, 0);
  5985.     if (rc) {
  5986.         pr_err("unable to write rs_to_rslow_chg: %d\n", rc);
  5987.         goto done;
  5988.     }
  5989.  
  5990.     half_float_to_buffer(rs_to_rslow_dischg, buffer);
  5991.     rc = fg_mem_write(chip, buffer, TEMP_RS_TO_RSLOW_REG, 2,
  5992.             RS_TO_RSLOW_DISCHG_OFFSET, 0);
  5993.     if (rc) {
  5994.         pr_err("unable to write rs_to_rslow_dischg: %d\n", rc);
  5995.         goto done;
  5996.     }
  5997.  
  5998.     if (fg_debug_mask & FG_STATUS)
  5999.         pr_info("Modified rs_rslow_chg: %lld, rs_rslow_dischg: %lld\n",
  6000.             rs_to_rslow_chg, rs_to_rslow_dischg);
  6001. done:
  6002.     return rc;
  6003. }
  6004.  
  6005. #define RSLOW_CFG_MASK      (BIT(2) | BIT(3) | BIT(4) | BIT(5))
  6006. #define RSLOW_CFG_ON_VAL    (BIT(2) | BIT(3))
  6007. #define RSLOW_THRESH_FULL_VAL   0xFF
  6008. static int fg_rslow_charge_comp_set(struct fg_chip *chip)
  6009. {
  6010.     int rc;
  6011.     u8 buffer[2];
  6012.  
  6013.     mutex_lock(&chip->rslow_comp.lock);
  6014.     fg_mem_lock(chip);
  6015.  
  6016.     rc = fg_mem_masked_write(chip, RSLOW_CFG_REG,
  6017.             RSLOW_CFG_MASK, RSLOW_CFG_ON_VAL, RSLOW_CFG_OFFSET);
  6018.     if (rc) {
  6019.         pr_err("unable to write rslow cfg: %d\n", rc);
  6020.         goto done;
  6021.     }
  6022.     rc = fg_mem_masked_write(chip, RSLOW_THRESH_REG,
  6023.             0xFF, RSLOW_THRESH_FULL_VAL, RSLOW_THRESH_OFFSET);
  6024.     if (rc) {
  6025.         pr_err("unable to write rslow thresh: %d\n", rc);
  6026.         goto done;
  6027.     }
  6028.  
  6029.     half_float_to_buffer(chip->rslow_comp.chg_rs_to_rslow, buffer);
  6030.     rc = fg_mem_write(chip, buffer,
  6031.             TEMP_RS_TO_RSLOW_REG, 2, RS_TO_RSLOW_CHG_OFFSET, 0);
  6032.     if (rc) {
  6033.         pr_err("unable to write rs to rslow: %d\n", rc);
  6034.         goto done;
  6035.     }
  6036.     half_float_to_buffer(chip->rslow_comp.chg_rslow_comp_c1, buffer);
  6037.     rc = fg_mem_write(chip, buffer,
  6038.             RSLOW_COMP_REG, 2, RSLOW_COMP_C1_OFFSET, 0);
  6039.     if (rc) {
  6040.         pr_err("unable to write rslow comp: %d\n", rc);
  6041.         goto done;
  6042.     }
  6043.     half_float_to_buffer(chip->rslow_comp.chg_rslow_comp_c2, buffer);
  6044.     rc = fg_mem_write(chip, buffer,
  6045.             RSLOW_COMP_REG, 2, RSLOW_COMP_C2_OFFSET, 0);
  6046.     if (rc) {
  6047.         pr_err("unable to write rslow comp: %d\n", rc);
  6048.         goto done;
  6049.     }
  6050.     chip->rslow_comp.active = true;
  6051.     if (fg_debug_mask & FG_STATUS)
  6052.         pr_info("Activated rslow charge comp values\n");
  6053.  
  6054. done:
  6055.     fg_mem_release(chip);
  6056.     mutex_unlock(&chip->rslow_comp.lock);
  6057.     return rc;
  6058. }
  6059.  
  6060. #define RSLOW_CFG_ORIG_MASK (BIT(4) | BIT(5))
  6061. static int fg_rslow_charge_comp_clear(struct fg_chip *chip)
  6062. {
  6063.     u8 reg;
  6064.     int rc;
  6065.  
  6066.     mutex_lock(&chip->rslow_comp.lock);
  6067.     fg_mem_lock(chip);
  6068.  
  6069.     reg = chip->rslow_comp.rslow_cfg & RSLOW_CFG_ORIG_MASK;
  6070.     rc = fg_mem_masked_write(chip, RSLOW_CFG_REG,
  6071.             RSLOW_CFG_MASK, reg, RSLOW_CFG_OFFSET);
  6072.     if (rc) {
  6073.         pr_err("unable to write rslow cfg: %d\n", rc);
  6074.         goto done;
  6075.     }
  6076.     rc = fg_mem_masked_write(chip, RSLOW_THRESH_REG,
  6077.             0xFF, chip->rslow_comp.rslow_thr, RSLOW_THRESH_OFFSET);
  6078.     if (rc) {
  6079.         pr_err("unable to write rslow thresh: %d\n", rc);
  6080.         goto done;
  6081.     }
  6082.  
  6083.     rc = fg_mem_write(chip, chip->rslow_comp.rs_to_rslow,
  6084.             TEMP_RS_TO_RSLOW_REG, 2, RS_TO_RSLOW_CHG_OFFSET, 0);
  6085.     if (rc) {
  6086.         pr_err("unable to write rs to rslow: %d\n", rc);
  6087.         goto done;
  6088.     }
  6089.     rc = fg_mem_write(chip, chip->rslow_comp.rslow_comp,
  6090.             RSLOW_COMP_REG, 4, RSLOW_COMP_C1_OFFSET, 0);
  6091.     if (rc) {
  6092.         pr_err("unable to write rslow comp: %d\n", rc);
  6093.         goto done;
  6094.     }
  6095.     chip->rslow_comp.active = false;
  6096.     if (fg_debug_mask & FG_STATUS)
  6097.         pr_info("Cleared rslow charge comp values\n");
  6098.  
  6099. done:
  6100.     fg_mem_release(chip);
  6101.     mutex_unlock(&chip->rslow_comp.lock);
  6102.     return rc;
  6103. }
  6104.  
  6105. static void rslow_comp_work(struct work_struct *work)
  6106. {
  6107.     int battery_soc_1b;
  6108.     struct fg_chip *chip = container_of(work,
  6109.                 struct fg_chip,
  6110.                 rslow_comp_work);
  6111.  
  6112.     battery_soc_1b = get_battery_soc_raw(chip) >> 16;
  6113.     if (battery_soc_1b > chip->rslow_comp.chg_rslow_comp_thr
  6114.             && chip->status == POWER_SUPPLY_STATUS_CHARGING) {
  6115.         if (!chip->rslow_comp.active)
  6116.             fg_rslow_charge_comp_set(chip);
  6117.     } else {
  6118.         if (chip->rslow_comp.active)
  6119.             fg_rslow_charge_comp_clear(chip);
  6120.     }
  6121. }
  6122.  
  6123. #define MICROUNITS_TO_ADC_RAW(units)    \
  6124.             div64_s64(units * LSB_16B_DENMTR, LSB_16B_NUMRTR)
  6125. static int update_chg_iterm(struct fg_chip *chip)
  6126. {
  6127.     u8 data[2];
  6128.     u16 converted_current_raw;
  6129.     s64 current_ma = -settings[FG_MEM_CHG_TERM_CURRENT].value;
  6130.  
  6131.     converted_current_raw = (s16)MICROUNITS_TO_ADC_RAW(current_ma * 1000);
  6132.     data[0] = cpu_to_le16(converted_current_raw) & 0xFF;
  6133.     data[1] = cpu_to_le16(converted_current_raw) >> 8;
  6134.  
  6135.     if (fg_debug_mask & FG_STATUS)
  6136.         pr_info("current = %lld, converted_raw = %04x, data = %02x %02x\n",
  6137.             current_ma, converted_current_raw, data[0], data[1]);
  6138.     return fg_mem_write(chip, data,
  6139.             settings[FG_MEM_CHG_TERM_CURRENT].address,
  6140.             2, settings[FG_MEM_CHG_TERM_CURRENT].offset, 0);
  6141. }
  6142.  
  6143. #define CC_CV_SETPOINT_REG  0x4F8
  6144. #define CC_CV_SETPOINT_OFFSET   0
  6145. static void update_cc_cv_setpoint(struct fg_chip *chip)
  6146. {
  6147.     int rc;
  6148.     u8 tmp[2];
  6149.  
  6150.     if (!chip->cc_cv_threshold_mv)
  6151.         return;
  6152.     batt_to_setpoint_adc(chip->cc_cv_threshold_mv, tmp);
  6153.     rc = fg_mem_write(chip, tmp, CC_CV_SETPOINT_REG, 2,
  6154.                 CC_CV_SETPOINT_OFFSET, 0);
  6155.     if (rc) {
  6156.         pr_err("failed to write CC_CV_VOLT rc=%d\n", rc);
  6157.         return;
  6158.     }
  6159.     if (fg_debug_mask & FG_STATUS)
  6160.         pr_info("Wrote %x %x to address %x for CC_CV setpoint\n",
  6161.             tmp[0], tmp[1], CC_CV_SETPOINT_REG);
  6162. }
  6163.  
  6164. #define CBITS_INPUT_FILTER_REG      0x4B4
  6165. #define CBITS_RMEAS1_OFFSET     1
  6166. #define CBITS_RMEAS2_OFFSET     2
  6167. #define CBITS_RMEAS1_DEFAULT_VAL    0x65
  6168. #define CBITS_RMEAS2_DEFAULT_VAL    0x65
  6169. #define IMPTR_FAST_TIME_SHIFT       1
  6170. #define IMPTR_LONG_TIME_SHIFT       (1 << 4)
  6171. #define IMPTR_PULSE_CTR_CHG     1
  6172. #define IMPTR_PULSE_CTR_DISCHG      (1 << 4)
  6173. static int fg_config_imptr_pulse(struct fg_chip *chip, bool slow)
  6174. {
  6175.     int rc;
  6176.     u8 cntr[2] = {0, 0};
  6177.     u8 val;
  6178.  
  6179.     if (slow == chip->imptr_pulse_slow_en) {
  6180.         if (fg_debug_mask & FG_STATUS)
  6181.             pr_info("imptr_pulse_slow is %sabled already\n",
  6182.                 slow ? "en" : "dis");
  6183.         return 0;
  6184.     }
  6185.  
  6186.     fg_mem_lock(chip);
  6187.  
  6188.     val = slow ? (IMPTR_FAST_TIME_SHIFT | IMPTR_LONG_TIME_SHIFT) :
  6189.         CBITS_RMEAS1_DEFAULT_VAL;
  6190.     rc = fg_mem_write(chip, &val, CBITS_INPUT_FILTER_REG, 1,
  6191.             CBITS_RMEAS1_OFFSET, 0);
  6192.     if (rc) {
  6193.         pr_err("unable to write cbits_rmeas1_offset rc=%d\n", rc);
  6194.         goto done;
  6195.     }
  6196.  
  6197.     val = slow ? (IMPTR_PULSE_CTR_CHG | IMPTR_PULSE_CTR_DISCHG) :
  6198.         CBITS_RMEAS2_DEFAULT_VAL;
  6199.     rc = fg_mem_write(chip, &val, CBITS_INPUT_FILTER_REG, 1,
  6200.             CBITS_RMEAS2_OFFSET, 0);
  6201.     if (rc) {
  6202.         pr_err("unable to write cbits_rmeas2_offset rc=%d\n", rc);
  6203.         goto done;
  6204.     }
  6205.  
  6206.     if (slow) {
  6207.         rc = fg_mem_write(chip, cntr, COUNTER_IMPTR_REG, 4,
  6208.                 COUNTER_IMPTR_OFFSET, 0);
  6209.         if (rc) {
  6210.             pr_err("failed to write COUNTER_IMPTR rc=%d\n", rc);
  6211.             goto done;
  6212.         }
  6213.  
  6214.         rc = fg_mem_write(chip, cntr, COUNTER_PULSE_REG, 2,
  6215.                 COUNTER_PULSE_OFFSET, 0);
  6216.         if (rc) {
  6217.             pr_err("failed to write COUNTER_IMPTR rc=%d\n", rc);
  6218.             goto done;
  6219.         }
  6220.     }
  6221.  
  6222.     chip->imptr_pulse_slow_en = slow;
  6223.     if (fg_debug_mask & FG_STATUS)
  6224.         pr_info("imptr_pulse_slow is %sabled\n", slow ? "en" : "dis");
  6225. done:
  6226.     fg_mem_release(chip);
  6227.     return rc;
  6228. }
  6229.  
  6230. #define CURRENT_DELTA_MIN_REG       0x42C
  6231. #define CURRENT_DELTA_MIN_OFFSET    1
  6232. #define SYS_CFG_1_REG           0x4AC
  6233. #define SYS_CFG_1_OFFSET        0
  6234. #define CURRENT_DELTA_MIN_DEFAULT   0x16
  6235. #define CURRENT_DELTA_MIN_500MA     0xCD
  6236. #define RSLOW_CFG_USE_FIX_RSER_VAL  BIT(7)
  6237. #define ENABLE_ESR_PULSE_VAL        BIT(3)
  6238. static int fg_config_esr_extract(struct fg_chip *chip, bool disable)
  6239. {
  6240.     int rc;
  6241.     u8 val;
  6242.  
  6243.     if (disable == chip->esr_extract_disabled) {
  6244.         if (fg_debug_mask & FG_STATUS)
  6245.             pr_info("ESR extract already %sabled\n",
  6246.                 disable ? "dis" : "en");
  6247.         return 0;
  6248.     }
  6249.  
  6250.     fg_mem_lock(chip);
  6251.  
  6252.     val = disable ? CURRENT_DELTA_MIN_500MA :
  6253.                 CURRENT_DELTA_MIN_DEFAULT;
  6254.     rc = fg_mem_write(chip, &val, CURRENT_DELTA_MIN_REG, 1,
  6255.             CURRENT_DELTA_MIN_OFFSET, 0);
  6256.     if (rc) {
  6257.         pr_err("unable to write curr_delta_min rc=%d\n", rc);
  6258.         goto done;
  6259.     }
  6260.  
  6261.     val = disable ? RSLOW_CFG_USE_FIX_RSER_VAL : 0;
  6262.     rc = fg_mem_masked_write(chip, RSLOW_CFG_REG,
  6263.             RSLOW_CFG_USE_FIX_RSER_VAL, val, RSLOW_CFG_OFFSET);
  6264.     if (rc) {
  6265.         pr_err("unable to write rslow cfg rc= %d\n", rc);
  6266.         goto done;
  6267.     }
  6268.  
  6269.     val = disable ? 0 : ENABLE_ESR_PULSE_VAL;
  6270.     rc = fg_mem_masked_write(chip, SYS_CFG_1_REG,
  6271.             ENABLE_ESR_PULSE_VAL, val, SYS_CFG_1_OFFSET);
  6272.     if (rc) {
  6273.         pr_err("unable to write sys_cfg_1 rc= %d\n", rc);
  6274.         goto done;
  6275.     }
  6276.  
  6277.     chip->esr_extract_disabled = disable;
  6278.     if (fg_debug_mask & FG_STATUS)
  6279.         pr_info("ESR extract is %sabled\n", disable ? "dis" : "en");
  6280. done:
  6281.     fg_mem_release(chip);
  6282.     return rc;
  6283. }
  6284.  
  6285. #define ESR_EXTRACT_STOP_SOC        2
  6286. #define IMPTR_PULSE_CONFIG_SOC      5
  6287. static void esr_extract_config_work(struct work_struct *work)
  6288. {
  6289.     struct fg_chip *chip = container_of(work, struct fg_chip,
  6290.                         esr_extract_config_work);
  6291.     bool input_present = is_input_present(chip);
  6292.     int capacity = get_prop_capacity(chip);
  6293.  
  6294.     if (input_present && capacity <= ESR_EXTRACT_STOP_SOC) {
  6295.         fg_config_esr_extract(chip, true);
  6296.     } else if (capacity > ESR_EXTRACT_STOP_SOC) {
  6297.         fg_config_esr_extract(chip, false);
  6298.  
  6299.         if (capacity <= IMPTR_PULSE_CONFIG_SOC)
  6300.             fg_config_imptr_pulse(chip, true);
  6301.         else
  6302.             fg_config_imptr_pulse(chip, false);
  6303.     }
  6304.  
  6305.     fg_relax(&chip->esr_extract_wakeup_source);
  6306. }
  6307.  
  6308. #define KI_COEFF_MEDC_REG       0x400
  6309. #define KI_COEFF_MEDC_OFFSET        0
  6310. #define KI_COEFF_HIGHC_REG      0x404
  6311. #define KI_COEFF_HIGHC_OFFSET       0
  6312. #define DEFAULT_MEDC_VOLTAGE_GAIN   3
  6313. #define DEFAULT_HIGHC_VOLTAGE_GAIN  2
  6314. static void discharge_gain_work(struct work_struct *work)
  6315. {
  6316.     struct fg_chip *chip = container_of(work, struct fg_chip,
  6317.                         dischg_gain_work);
  6318.     u8 buf[2];
  6319.     int capacity, rc, i;
  6320.     int64_t medc_val = DEFAULT_MEDC_VOLTAGE_GAIN;
  6321.     int64_t highc_val = DEFAULT_HIGHC_VOLTAGE_GAIN;
  6322.  
  6323.     capacity = get_prop_capacity(chip);
  6324.     if (chip->status == POWER_SUPPLY_STATUS_DISCHARGING) {
  6325.         for (i = VOLT_GAIN_MAX - 1; i >= 0; i--) {
  6326.             if (capacity <= chip->dischg_gain.soc[i]) {
  6327.                 medc_val = chip->dischg_gain.medc_gain[i];
  6328.                 highc_val = chip->dischg_gain.highc_gain[i];
  6329.             }
  6330.         }
  6331.     }
  6332.  
  6333.     if (fg_debug_mask & FG_STATUS)
  6334.         pr_info("Capacity: %d, medc_gain: %lld highc_gain: %lld\n",
  6335.             capacity, medc_val, highc_val);
  6336.  
  6337.     medc_val *= MICRO_UNIT;
  6338.     half_float_to_buffer(medc_val, buf);
  6339.     rc = fg_mem_write(chip, buf, KI_COEFF_MEDC_REG, 2,
  6340.                 KI_COEFF_MEDC_OFFSET, 0);
  6341.     if (rc)
  6342.         pr_err("Couldn't write to ki_coeff_medc_reg, rc=%d\n", rc);
  6343.     else if (fg_debug_mask & FG_STATUS)
  6344.         pr_info("Value [%x %x] written to ki_coeff_medc\n", buf[0],
  6345.             buf[1]);
  6346.  
  6347.     highc_val *= MICRO_UNIT;
  6348.     half_float_to_buffer(highc_val, buf);
  6349.     rc = fg_mem_write(chip, buf, KI_COEFF_HIGHC_REG, 2,
  6350.                 KI_COEFF_HIGHC_OFFSET, 0);
  6351.     if (rc)
  6352.         pr_err("Couldn't write to ki_coeff_highc_reg, rc=%d\n", rc);
  6353.     else if (fg_debug_mask & FG_STATUS)
  6354.         pr_info("Value [%x %x] written to ki_coeff_highc\n", buf[0],
  6355.             buf[1]);
  6356.  
  6357.     fg_relax(&chip->dischg_gain_wakeup_source);
  6358. }
  6359.  
  6360. #define LOW_LATENCY         BIT(6)
  6361. #define PROFILE_INTEGRITY_REG       0x53C
  6362. #define PROFILE_INTEGRITY_BIT       BIT(0)
  6363. #define FIRST_EST_DONE_BIT      BIT(5)
  6364. #define MAX_TRIES_FIRST_EST     3
  6365. #define FIRST_EST_WAIT_MS       2000
  6366. #define PROFILE_LOAD_TIMEOUT_MS     5000
  6367. static int fg_do_restart(struct fg_chip *chip, bool write_profile)
  6368. {
  6369.     int rc, ibat_ua;
  6370.     u8 reg = 0;
  6371.     u8 buf[2];
  6372.     bool tried_once = false;
  6373.  
  6374.     if (fg_debug_mask & FG_STATUS)
  6375.         pr_info("Restarting FG...\n");
  6376.  
  6377. try_again:
  6378.     if (write_profile && !chip->ima_error_handling) {
  6379.         if (!chip->charging_disabled) {
  6380.             pr_err("Charging not yet disabled!\n");
  6381.             return -EINVAL;
  6382.         }
  6383.  
  6384.         ibat_ua = get_sram_prop_now(chip, FG_DATA_CURRENT);
  6385.         if (ibat_ua == -EINVAL) {
  6386.             pr_err("SRAM not updated yet!\n");
  6387.             return ibat_ua;
  6388.         }
  6389.  
  6390.         if (ibat_ua < 0) {
  6391.             pr_warn("Charging enabled?, ibat_ua: %d\n", ibat_ua);
  6392.  
  6393.             if (!tried_once) {
  6394.                 cancel_delayed_work(&chip->update_sram_data);
  6395.                 schedule_delayed_work(&chip->update_sram_data,
  6396.                     msecs_to_jiffies(0));
  6397.                 msleep(1000);
  6398.                 tried_once = true;
  6399.                 goto try_again;
  6400.             }
  6401.         }
  6402.     }
  6403.  
  6404.     chip->fg_restarting = true;
  6405.     /*
  6406.      * save the temperature if the sw rbias control is active so that there
  6407.      * is no gap of time when there is no valid temperature read after the
  6408.      * restart
  6409.      */
  6410.     if (chip->sw_rbias_ctrl) {
  6411.         rc = fg_mem_read(chip, buf,
  6412.                 fg_data[FG_DATA_BATT_TEMP].address,
  6413.                 fg_data[FG_DATA_BATT_TEMP].len,
  6414.                 fg_data[FG_DATA_BATT_TEMP].offset, 0);
  6415.         if (rc) {
  6416.             pr_err("failed to read batt temp rc=%d\n", rc);
  6417.             goto sub_and_fail;
  6418.         }
  6419.     }
  6420.     /*
  6421.      * release the sram access and configure the correct settings
  6422.      * before re-requesting access.
  6423.      */
  6424.     mutex_lock(&chip->rw_lock);
  6425.     fg_release_access(chip);
  6426.  
  6427.     rc = fg_masked_write(chip, chip->soc_base + SOC_BOOT_MOD,
  6428.             NO_OTP_PROF_RELOAD, 0, 1);
  6429.     if (rc) {
  6430.         pr_err("failed to set no otp reload bit\n");
  6431.         goto unlock_and_fail;
  6432.     }
  6433.  
  6434.     /* unset the restart bits so the fg doesn't continuously restart */
  6435.     reg = REDO_FIRST_ESTIMATE | RESTART_GO;
  6436.     rc = fg_masked_write(chip, chip->soc_base + SOC_RESTART,
  6437.             reg, 0, 1);
  6438.     if (rc) {
  6439.         pr_err("failed to unset fg restart: %d\n", rc);
  6440.         goto unlock_and_fail;
  6441.     }
  6442.  
  6443.     rc = fg_masked_write(chip, MEM_INTF_CFG(chip),
  6444.             LOW_LATENCY, LOW_LATENCY, 1);
  6445.     if (rc) {
  6446.         pr_err("failed to set low latency access bit\n");
  6447.         goto unlock_and_fail;
  6448.     }
  6449.     mutex_unlock(&chip->rw_lock);
  6450.  
  6451.     /* read once to get a fg cycle in */
  6452.     rc = fg_mem_read(chip, &reg, PROFILE_INTEGRITY_REG, 1, 0, 0);
  6453.     if (rc) {
  6454.         pr_err("failed to read profile integrity rc=%d\n", rc);
  6455.         goto fail;
  6456.     }
  6457.  
  6458.     /*
  6459.      * If this is not the first time a profile has been loaded, sleep for
  6460.      * 3 seconds to make sure the NO_OTP_RELOAD is cleared in memory
  6461.      */
  6462.     if (chip->first_profile_loaded)
  6463.         msleep(3000);
  6464.  
  6465.     mutex_lock(&chip->rw_lock);
  6466.     fg_release_access(chip);
  6467.     rc = fg_masked_write(chip, MEM_INTF_CFG(chip), LOW_LATENCY, 0, 1);
  6468.     if (rc) {
  6469.         pr_err("failed to set low latency access bit\n");
  6470.         goto unlock_and_fail;
  6471.     }
  6472.  
  6473.     atomic_add_return(1, &chip->memif_user_cnt);
  6474.     mutex_unlock(&chip->rw_lock);
  6475.  
  6476.     if (write_profile) {
  6477.         /* write the battery profile */
  6478.         rc = fg_mem_write(chip, chip->batt_profile, BATT_PROFILE_OFFSET,
  6479.                 chip->batt_profile_len, 0, 1);
  6480.         if (rc) {
  6481.             pr_err("failed to write profile rc=%d\n", rc);
  6482.             goto sub_and_fail;
  6483.         }
  6484.         /* write the integrity bits and release access */
  6485.         rc = fg_mem_masked_write(chip, PROFILE_INTEGRITY_REG,
  6486.                 PROFILE_INTEGRITY_BIT,
  6487.                 PROFILE_INTEGRITY_BIT, 0);
  6488.         if (rc) {
  6489.             pr_err("failed to write profile rc=%d\n", rc);
  6490.             goto sub_and_fail;
  6491.         }
  6492.     }
  6493.  
  6494.     /* decrement the user count so that memory access can be released */
  6495.     fg_release_access_if_necessary(chip);
  6496.  
  6497.     /*
  6498.      * make sure that the first estimate has completed
  6499.      * in case of a hotswap
  6500.      */
  6501.     rc = wait_for_completion_interruptible_timeout(&chip->first_soc_done,
  6502.             msecs_to_jiffies(PROFILE_LOAD_TIMEOUT_MS));
  6503.     if (rc <= 0) {
  6504.         pr_err("transaction timed out rc=%d\n", rc);
  6505.         rc = -ETIMEDOUT;
  6506.         goto fail;
  6507.     }
  6508.  
  6509.     /*
  6510.      * reinitialize the completion so that the driver knows when the restart
  6511.      * finishes
  6512.      */
  6513.     reinit_completion(&chip->first_soc_done);
  6514.  
  6515.     if (chip->esr_pulse_tune_en) {
  6516.         fg_stay_awake(&chip->esr_extract_wakeup_source);
  6517.         schedule_work(&chip->esr_extract_config_work);
  6518.     }
  6519.  
  6520.     /*
  6521.      * set the restart bits so that the next fg cycle will not reload
  6522.      * the profile
  6523.      */
  6524.     rc = fg_masked_write(chip, chip->soc_base + SOC_BOOT_MOD,
  6525.             NO_OTP_PROF_RELOAD, NO_OTP_PROF_RELOAD, 1);
  6526.     if (rc) {
  6527.         pr_err("failed to set no otp reload bit\n");
  6528.         goto fail;
  6529.     }
  6530.  
  6531.     reg = REDO_FIRST_ESTIMATE | RESTART_GO;
  6532.     rc = fg_masked_write(chip, chip->soc_base + SOC_RESTART,
  6533.             reg, reg, 1);
  6534.     if (rc) {
  6535.         pr_err("failed to set fg restart: %d\n", rc);
  6536.         goto fail;
  6537.     }
  6538.  
  6539.     msleep(2000);
  6540.  
  6541.     /* wait for the first estimate to complete */
  6542.     rc = wait_for_completion_interruptible_timeout(&chip->first_soc_done,
  6543.             msecs_to_jiffies(PROFILE_LOAD_TIMEOUT_MS));
  6544.     if (rc <= 0) {
  6545.         pr_err("transaction timed out rc=%d\n", rc);
  6546.         rc = -ETIMEDOUT;
  6547.         goto fail;
  6548.     }
  6549.     rc = fg_read(chip, &reg, INT_RT_STS(chip->soc_base), 1);
  6550.     if (rc) {
  6551.         pr_err("spmi read failed: addr=%03X, rc=%d\n",
  6552.                 INT_RT_STS(chip->soc_base), rc);
  6553.         goto fail;
  6554.     }
  6555.     if ((reg & FIRST_EST_DONE_BIT) == 0)
  6556.         pr_err("Battery profile reloading failed, no first estimate\n");
  6557.  
  6558.     rc = fg_masked_write(chip, chip->soc_base + SOC_BOOT_MOD,
  6559.             NO_OTP_PROF_RELOAD, 0, 1);
  6560.     if (rc) {
  6561.         pr_err("failed to set no otp reload bit\n");
  6562.         goto fail;
  6563.     }
  6564.     /* unset the restart bits so the fg doesn't continuously restart */
  6565.     reg = REDO_FIRST_ESTIMATE | RESTART_GO;
  6566.     rc = fg_masked_write(chip, chip->soc_base + SOC_RESTART,
  6567.             reg, 0, 1);
  6568.     if (rc) {
  6569.         pr_err("failed to unset fg restart: %d\n", rc);
  6570.         goto fail;
  6571.     }
  6572.  
  6573.     /* restore the battery temperature reading here */
  6574.     if (chip->sw_rbias_ctrl) {
  6575.         if (fg_debug_mask & FG_STATUS)
  6576.             pr_info("reloaded 0x%02x%02x into batt temp",
  6577.                     buf[0], buf[1]);
  6578.         rc = fg_mem_write(chip, buf,
  6579.                 fg_data[FG_DATA_BATT_TEMP].address,
  6580.                 fg_data[FG_DATA_BATT_TEMP].len,
  6581.                 fg_data[FG_DATA_BATT_TEMP].offset, 0);
  6582.         if (rc) {
  6583.             pr_err("failed to write batt temp rc=%d\n", rc);
  6584.             goto fail;
  6585.         }
  6586.     }
  6587.  
  6588.     /* Enable charging now as the first estimate is done now */
  6589.     if (chip->charging_disabled) {
  6590.         rc = set_prop_enable_charging(chip, true);
  6591.         if (rc)
  6592.             pr_err("Failed to enable charging, rc=%d\n", rc);
  6593.         else
  6594.             chip->charging_disabled = false;
  6595.     }
  6596.  
  6597.     chip->fg_restarting = false;
  6598.  
  6599.     pr_info("done!\n");
  6600.     return 0;
  6601.  
  6602. unlock_and_fail:
  6603.     mutex_unlock(&chip->rw_lock);
  6604.     goto fail;
  6605. sub_and_fail:
  6606.     fg_release_access_if_necessary(chip);
  6607.     goto fail;
  6608. fail:
  6609.     chip->fg_restarting = false;
  6610.     return -EINVAL;
  6611. }
  6612.  
  6613. #define FG_PROFILE_LEN          128
  6614. #define PROFILE_COMPARE_LEN     32
  6615. #define THERMAL_COEFF_ADDR      0x444
  6616. #define THERMAL_COEFF_OFFSET        0x2
  6617. #define BATTERY_PSY_WAIT_MS     2000
  6618. static int fg_batt_profile_init(struct fg_chip *chip)
  6619. {
  6620.     int rc = 0, ret;
  6621.     int len, batt_id;
  6622.     struct device_node *node = chip->spmi->dev.of_node;
  6623.     struct device_node *batt_node, *profile_node;
  6624.     const char *data, *batt_type_str;
  6625.     bool tried_again = false, vbat_in_range, profiles_same;
  6626.     bool esr_in_range;
  6627.     u8 reg = 0;
  6628.  
  6629. wait:
  6630.     fg_stay_awake(&chip->profile_wakeup_source);
  6631.     ret = wait_for_completion_interruptible_timeout(&chip->batt_id_avail,
  6632.             msecs_to_jiffies(PROFILE_LOAD_TIMEOUT_MS));
  6633.     /* If we were interrupted wait again one more time. */
  6634.     if (ret == -ERESTARTSYS && !tried_again) {
  6635.         tried_again = true;
  6636.         pr_debug("interrupted, waiting again\n");
  6637.         goto wait;
  6638.     } else if (ret <= 0) {
  6639.         rc = -ETIMEDOUT;
  6640.         pr_err("profile loading timed out rc=%d\n", rc);
  6641.         goto no_profile;
  6642.     }
  6643.  
  6644.     /* Check whether the charger is ready */
  6645.     if (!is_charger_available(chip))
  6646.         goto reschedule;
  6647.  
  6648.     /* Disable charging for a FG cycle before calculating vbat_in_range */
  6649.     if (!chip->charging_disabled) {
  6650.         rc = set_prop_enable_charging(chip, false);
  6651.         if (rc)
  6652.             pr_err("Failed to disable charging, rc=%d\n", rc);
  6653.  
  6654.         goto update;
  6655.     }
  6656.  
  6657.     batt_node = of_find_node_by_name(node, "qcom,battery-data");
  6658.     if (!batt_node) {
  6659.         pr_warn("No available batterydata, using OTP defaults\n");
  6660.         rc = 0;
  6661.         goto no_profile;
  6662.     }
  6663.  
  6664.     batt_id = get_sram_prop_now(chip, FG_DATA_BATT_ID);
  6665.     batt_id /= 1000;
  6666.     if (fg_debug_mask & FG_STATUS)
  6667.         pr_info("battery id = %dKOhms\n", batt_id);
  6668.  
  6669.     profile_node = of_batterydata_get_best_profile(batt_node, batt_id,
  6670.                             fg_batt_type);
  6671.     if (IS_ERR_OR_NULL(profile_node)) {
  6672.         rc = PTR_ERR(profile_node);
  6673.         if (rc == -EPROBE_DEFER) {
  6674.             goto reschedule;
  6675.         } else {
  6676.             pr_err("couldn't find profile handle rc=%d\n", rc);
  6677.             goto no_profile;
  6678.         }
  6679.     }
  6680.  
  6681.     /* read rslow compensation values if they're available */
  6682.     rc = of_property_read_u32(profile_node, "qcom,chg-rs-to-rslow",
  6683.                     &chip->rslow_comp.chg_rs_to_rslow);
  6684.     if (rc) {
  6685.         chip->rslow_comp.chg_rs_to_rslow = -EINVAL;
  6686.         if (rc != -EINVAL)
  6687.             pr_err("Could not read rs to rslow: %d\n", rc);
  6688.     }
  6689.     rc = of_property_read_u32(profile_node, "qcom,chg-rslow-comp-c1",
  6690.                     &chip->rslow_comp.chg_rslow_comp_c1);
  6691.     if (rc) {
  6692.         chip->rslow_comp.chg_rslow_comp_c1 = -EINVAL;
  6693.         if (rc != -EINVAL)
  6694.             pr_err("Could not read rslow comp c1: %d\n", rc);
  6695.     }
  6696.     rc = of_property_read_u32(profile_node, "qcom,chg-rslow-comp-c2",
  6697.                     &chip->rslow_comp.chg_rslow_comp_c2);
  6698.     if (rc) {
  6699.         chip->rslow_comp.chg_rslow_comp_c2 = -EINVAL;
  6700.         if (rc != -EINVAL)
  6701.             pr_err("Could not read rslow comp c2: %d\n", rc);
  6702.     }
  6703.     rc = of_property_read_u32(profile_node, "qcom,chg-rslow-comp-thr",
  6704.                     &chip->rslow_comp.chg_rslow_comp_thr);
  6705.     if (rc) {
  6706.         chip->rslow_comp.chg_rslow_comp_thr = -EINVAL;
  6707.         if (rc != -EINVAL)
  6708.             pr_err("Could not read rslow comp thr: %d\n", rc);
  6709.     }
  6710.  
  6711.     rc = of_property_read_u32(profile_node, "qcom,max-voltage-uv",
  6712.                     &chip->batt_max_voltage_uv);
  6713.  
  6714.     if (rc)
  6715.         pr_warn("couldn't find battery max voltage\n");
  6716.  
  6717.     chip->dummy_battery_fake_temp = true;
  6718.  
  6719.     /*
  6720.      * Only configure from profile if fg-cc-cv-threshold-mv is not
  6721.      * defined in the charger device node.
  6722.      */
  6723.     if (!of_find_property(chip->spmi->dev.of_node,
  6724.                 "qcom,fg-cc-cv-threshold-mv", NULL)) {
  6725.         of_property_read_u32(profile_node,
  6726.                 "qcom,fg-cc-cv-threshold-mv",
  6727.                 &chip->cc_cv_threshold_mv);
  6728.     }
  6729.  
  6730.     data = of_get_property(profile_node, "qcom,fg-profile-data", &len);
  6731.     if (!data) {
  6732.         pr_err("no battery profile loaded\n");
  6733.         rc = 0;
  6734.         goto no_profile;
  6735.     }
  6736.  
  6737.     if (len != FG_PROFILE_LEN) {
  6738.         pr_err("battery profile incorrect size: %d\n", len);
  6739.         rc = -EINVAL;
  6740.         goto no_profile;
  6741.     }
  6742.  
  6743.     rc = of_property_read_string(profile_node, "qcom,battery-type",
  6744.                     &batt_type_str);
  6745.     if (rc) {
  6746.         pr_err("Could not find battery data type: %d\n", rc);
  6747.         rc = 0;
  6748.         goto no_profile;
  6749.     }
  6750.  
  6751.     if (!chip->batt_profile)
  6752.         chip->batt_profile = devm_kzalloc(chip->dev,
  6753.                 sizeof(char) * len, GFP_KERNEL);
  6754.  
  6755.     if (!chip->batt_profile) {
  6756.         pr_err("out of memory\n");
  6757.         rc = -ENOMEM;
  6758.         goto no_profile;
  6759.     }
  6760.  
  6761.     rc = fg_mem_read(chip, &reg, PROFILE_INTEGRITY_REG, 1, 0, 1);
  6762.     if (rc) {
  6763.         pr_err("failed to read profile integrity rc=%d\n", rc);
  6764.         goto no_profile;
  6765.     }
  6766.  
  6767.     rc = fg_mem_read(chip, chip->batt_profile, BATT_PROFILE_OFFSET,
  6768.             len, 0, 1);
  6769.     if (rc) {
  6770.         pr_err("failed to read profile rc=%d\n", rc);
  6771.         goto no_profile;
  6772.     }
  6773.  
  6774.     esr_in_range = ((fg_data[FG_DATA_BATT_ESR].value < ESR_MAX) &&
  6775.             (fg_data[FG_DATA_BATT_ESR].value > ESR_MIN));
  6776.  
  6777.     vbat_in_range = get_vbat_est_diff(chip)
  6778.             < settings[FG_MEM_VBAT_EST_DIFF].value * 1000;
  6779.     profiles_same = memcmp(chip->batt_profile, data,
  6780.                     PROFILE_COMPARE_LEN) == 0;
  6781.     if (reg & PROFILE_INTEGRITY_BIT) {
  6782.         fg_cap_learning_load_data(chip);
  6783.         if (vbat_in_range && !fg_is_batt_empty(chip) && profiles_same &&
  6784.             esr_in_range){
  6785.             if (fg_debug_mask & FG_STATUS)
  6786.                 pr_info("Battery profiles same, using default\n");
  6787.             if (fg_est_dump)
  6788.                 schedule_work(&chip->dump_sram);
  6789.             /*
  6790.              * Copy the profile read from device tree for
  6791.              * getting profile parameters later.
  6792.              */
  6793.             memcpy(chip->batt_profile, data, len);
  6794.             chip->batt_profile_len = len;
  6795.             goto done;
  6796.         }
  6797.     } else {
  6798.         pr_info("Battery profile not same, clearing data\n");
  6799.         clear_cycle_counter(chip);
  6800.         chip->learning_data.learned_cc_uah = 0;
  6801.     }
  6802.  
  6803.     if (fg_est_dump)
  6804.         dump_sram(&chip->dump_sram);
  6805.  
  6806.     if ((fg_debug_mask & FG_STATUS) && !esr_in_range)
  6807.         pr_info("ESR out of range: ESR %d mohm\n",
  6808.             fg_data[FG_DATA_BATT_ESR].value);
  6809.     if ((fg_debug_mask & FG_STATUS) && !vbat_in_range)
  6810.         pr_info("Vbat out of range: v_current_pred: %d, v: %d, thres: %d\n",
  6811.                 fg_data[FG_DATA_CPRED_VOLTAGE].value,
  6812.                 fg_data[FG_DATA_VOLTAGE].value,
  6813.                 settings[FG_MEM_VBAT_EST_DIFF].value * 1000);
  6814.     if ((fg_debug_mask & FG_STATUS) && fg_is_batt_empty(chip))
  6815.         pr_info("battery empty\n");
  6816.  
  6817.     if ((fg_debug_mask & FG_STATUS) && !profiles_same)
  6818.         pr_info("profiles differ\n");
  6819.  
  6820.     if (fg_debug_mask & FG_STATUS) {
  6821.         pr_info("Using new profile\n");
  6822.         print_hex_dump(KERN_INFO, "FG: loaded profile: ",
  6823.                 DUMP_PREFIX_NONE, 16, 1,
  6824.                 chip->batt_profile, len, false);
  6825.     }
  6826.  
  6827.     memcpy(chip->batt_profile, data, len);
  6828.     chip->batt_profile_len = len;
  6829.  
  6830.     if (fg_debug_mask & FG_STATUS)
  6831.         print_hex_dump(KERN_INFO, "FG: new profile: ",
  6832.                 DUMP_PREFIX_NONE, 16, 1, chip->batt_profile,
  6833.                 chip->batt_profile_len, false);
  6834.  
  6835.     rc = fg_do_restart(chip, true);
  6836.     if (rc) {
  6837.         pr_err("restart failed: %d\n", rc);
  6838.         goto no_profile;
  6839.     }
  6840.  
  6841.     /*
  6842.      * Only configure from profile if thermal-coefficients is not
  6843.      * defined in the FG device node.
  6844.      */
  6845.     if (!of_find_property(chip->spmi->dev.of_node,
  6846.                 "qcom,thermal-coefficients", NULL)) {
  6847.         data = of_get_property(profile_node,
  6848.                 "qcom,thermal-coefficients", &len);
  6849.         if (data && len == THERMAL_COEFF_N_BYTES) {
  6850.             memcpy(chip->thermal_coefficients, data, len);
  6851.             rc = fg_mem_write(chip, chip->thermal_coefficients,
  6852.                 THERMAL_COEFF_ADDR, THERMAL_COEFF_N_BYTES,
  6853.                 THERMAL_COEFF_OFFSET, 0);
  6854.             if (rc)
  6855.                 pr_err("spmi write failed addr:%03x, ret:%d\n",
  6856.                         THERMAL_COEFF_ADDR, rc);
  6857.             else if (fg_debug_mask & FG_STATUS)
  6858.                 pr_info("Battery thermal coefficients changed\n");
  6859.         }
  6860.     }
  6861.  
  6862.     if (chip->rconn_mohm > 0) {
  6863.         rc = fg_update_batt_rslow_settings(chip);
  6864.         if (rc)
  6865.             pr_err("Error in updating ESR, rc=%d\n", rc);
  6866.     }
  6867. done:
  6868.     if (chip->charging_disabled) {
  6869.         rc = set_prop_enable_charging(chip, true);
  6870.         if (rc)
  6871.             pr_err("Failed to enable charging, rc=%d\n", rc);
  6872.         else
  6873.             chip->charging_disabled = false;
  6874.     }
  6875.  
  6876.     if (fg_batt_type)
  6877.         chip->batt_type = fg_batt_type;
  6878.     else
  6879.         chip->batt_type = batt_type_str;
  6880.  
  6881.     if (chip->first_profile_loaded && fg_reset_on_lockup) {
  6882.         if (fg_debug_mask & FG_STATUS)
  6883.             pr_info("restoring SRAM registers\n");
  6884.         rc = fg_backup_sram_registers(chip, false);
  6885.         if (rc)
  6886.             pr_err("Couldn't restore sram registers\n");
  6887.  
  6888.         /* Read the cycle counter back from FG SRAM */
  6889.         if (chip->cyc_ctr.en)
  6890.             restore_cycle_counter(chip);
  6891.     }
  6892.  
  6893.     chip->soc_empty = false;
  6894.     chip->first_profile_loaded = true;
  6895.     chip->profile_loaded = true;
  6896.     chip->soc_reporting_ready = true;
  6897.     chip->battery_missing = is_battery_missing(chip);
  6898.     update_chg_iterm(chip);
  6899.     update_cc_cv_setpoint(chip);
  6900.     rc = populate_system_data(chip);
  6901.     if (rc) {
  6902.         pr_err("failed to read ocv properties=%d\n", rc);
  6903.         return rc;
  6904.     }
  6905.     estimate_battery_age(chip, &chip->actual_cap_uah);
  6906.     schedule_work(&chip->status_change_work);
  6907.     if (chip->power_supply_registered)
  6908.         power_supply_changed(&chip->bms_psy);
  6909.     fg_relax(&chip->profile_wakeup_source);
  6910.     pr_info("Battery SOC: %d, V: %duV\n", get_prop_capacity(chip),
  6911.         fg_data[FG_DATA_VOLTAGE].value);
  6912.     complete_all(&chip->fg_reset_done);
  6913.     return rc;
  6914. no_profile:
  6915.     chip->soc_reporting_ready = true;
  6916.     if (chip->charging_disabled) {
  6917.         rc = set_prop_enable_charging(chip, true);
  6918.         if (rc)
  6919.             pr_err("Failed to enable charging, rc=%d\n", rc);
  6920.         else
  6921.             chip->charging_disabled = false;
  6922.     }
  6923.  
  6924.     if (chip->power_supply_registered)
  6925.         power_supply_changed(&chip->bms_psy);
  6926.     fg_relax(&chip->profile_wakeup_source);
  6927.     return rc;
  6928. update:
  6929.     cancel_delayed_work(&chip->update_sram_data);
  6930.     schedule_delayed_work(
  6931.         &chip->update_sram_data,
  6932.         msecs_to_jiffies(0));
  6933. reschedule:
  6934.     schedule_delayed_work(
  6935.         &chip->batt_profile_init,
  6936.         msecs_to_jiffies(BATTERY_PSY_WAIT_MS));
  6937.     fg_relax(&chip->profile_wakeup_source);
  6938.     return 0;
  6939. }
  6940.  
  6941. static void check_empty_work(struct work_struct *work)
  6942. {
  6943.     struct fg_chip *chip = container_of(work,
  6944.                 struct fg_chip,
  6945.                 check_empty_work.work);
  6946.     bool vbatt_low_sts;
  6947.     int msoc;
  6948.  
  6949.     /* handle empty soc based on vbatt-low interrupt */
  6950.     if (chip->use_vbat_low_empty_soc) {
  6951.         if (fg_get_vbatt_status(chip, &vbatt_low_sts))
  6952.             goto out;
  6953.  
  6954.         msoc = get_monotonic_soc_raw(chip);
  6955.  
  6956.         if (fg_debug_mask & FG_STATUS)
  6957.             pr_info("Vbatt_low: %d, msoc: %d\n", vbatt_low_sts,
  6958.                 msoc);
  6959.         if (vbatt_low_sts || (msoc == 0))
  6960.             chip->soc_empty = true;
  6961.         else
  6962.             chip->soc_empty = false;
  6963.  
  6964.         if (chip->power_supply_registered)
  6965.             power_supply_changed(&chip->bms_psy);
  6966.  
  6967.         if (!chip->vbat_low_irq_enabled) {
  6968.             enable_irq(chip->batt_irq[VBATT_LOW].irq);
  6969.             enable_irq_wake(chip->batt_irq[VBATT_LOW].irq);
  6970.             chip->vbat_low_irq_enabled = true;
  6971.         }
  6972.     } else if (fg_is_batt_empty(chip)) {
  6973.         if (fg_debug_mask & FG_STATUS)
  6974.             pr_info("EMPTY SOC high\n");
  6975.         chip->soc_empty = true;
  6976.         if (chip->power_supply_registered)
  6977.             power_supply_changed(&chip->bms_psy);
  6978.     }
  6979.  
  6980. #ifdef CONFIG_PRODUCT_LE_ZL1
  6981.     if (empty_cn > 40) {
  6982.         pr_err("low voltage, forcing shutdown immediately\n");
  6983.         orderly_poweroff(true);
  6984.     }
  6985.  
  6986.     if (chip->soc_empty) {
  6987.         empty_cn ++;
  6988.         queue_delayed_work(system_power_efficient_wq,
  6989.             &chip->check_empty_work, msecs_to_jiffies(FG_EMPTY_DEBOUNCE_MS));
  6990.     } else {
  6991.         empty_cn = 0;
  6992.     }
  6993. #endif
  6994.  
  6995. out:
  6996.     fg_relax(&chip->empty_check_wakeup_source);
  6997. }
  6998.  
  6999. static void batt_profile_init(struct work_struct *work)
  7000. {
  7001.     struct fg_chip *chip = container_of(work,
  7002.                 struct fg_chip,
  7003.                 batt_profile_init.work);
  7004.  
  7005.     if (fg_batt_profile_init(chip))
  7006.         pr_err("failed to initialize profile\n");
  7007. }
  7008.  
  7009. static void sysfs_restart_work(struct work_struct *work)
  7010. {
  7011.     struct fg_chip *chip = container_of(work,
  7012.                 struct fg_chip,
  7013.                 sysfs_restart_work);
  7014.     int rc;
  7015.  
  7016.     rc = fg_do_restart(chip, false);
  7017.     if (rc)
  7018.         pr_err("fg restart failed: %d\n", rc);
  7019.     mutex_lock(&chip->sysfs_restart_lock);
  7020.     fg_restart = 0;
  7021.     mutex_unlock(&chip->sysfs_restart_lock);
  7022. }
  7023.  
  7024. #define SRAM_RELEASE_TIMEOUT_MS     500
  7025. static void charge_full_work(struct work_struct *work)
  7026. {
  7027.     struct fg_chip *chip = container_of(work,
  7028.                 struct fg_chip,
  7029.                 charge_full_work);
  7030.     int rc;
  7031.     u8 buffer[3];
  7032.     int bsoc;
  7033.     int resume_soc_raw = settings[FG_MEM_RESUME_SOC].value;
  7034.     bool disable = false;
  7035.     u8 reg;
  7036.  
  7037.     if (chip->status != POWER_SUPPLY_STATUS_FULL) {
  7038.         if (fg_debug_mask & FG_STATUS)
  7039.             pr_info("battery not full: %d\n", chip->status);
  7040.         disable = true;
  7041.     }
  7042.  
  7043.     fg_mem_lock(chip);
  7044.     rc = fg_mem_read(chip, buffer, BATTERY_SOC_REG, 3, 1, 0);
  7045.     if (rc) {
  7046.         pr_err("Unable to read battery soc: %d\n", rc);
  7047.         goto out;
  7048.     }
  7049.     if (buffer[2] <= resume_soc_raw) {
  7050.         if (fg_debug_mask & FG_STATUS)
  7051.             pr_info("bsoc = 0x%02x <= resume = 0x%02x\n",
  7052.                     buffer[2], resume_soc_raw);
  7053.         disable = true;
  7054.     }
  7055.     if (!disable)
  7056.         goto out;
  7057.  
  7058.     rc = fg_mem_write(chip, buffer, SOC_FULL_REG, 3,
  7059.             SOC_FULL_OFFSET, 0);
  7060.     if (rc) {
  7061.         pr_err("failed to write SOC_FULL rc=%d\n", rc);
  7062.         goto out;
  7063.     }
  7064.     /* force a full soc value into the monotonic in order to display 100 */
  7065.     buffer[0] = 0xFF;
  7066.     buffer[1] = 0xFF;
  7067.     rc = fg_mem_write(chip, buffer, SRAM_MONOTONIC_SOC_REG, 2,
  7068.             SRAM_MONOTONIC_SOC_OFFSET, 0);
  7069.     if (rc) {
  7070.         pr_err("failed to write SOC_FULL rc=%d\n", rc);
  7071.         goto out;
  7072.     }
  7073.     if (fg_debug_mask & FG_STATUS) {
  7074.         bsoc = buffer[0] | buffer[1] << 8 | buffer[2] << 16;
  7075.         pr_info("wrote %06x into soc full\n", bsoc);
  7076.     }
  7077.     fg_mem_release(chip);
  7078.     /*
  7079.      * wait one cycle to make sure the soc is updated before clearing
  7080.      * the soc mask bit
  7081.      */
  7082.     fg_mem_lock(chip);
  7083.     fg_mem_read(chip, &reg, PROFILE_INTEGRITY_REG, 1, 0, 0);
  7084. out:
  7085.     fg_mem_release(chip);
  7086.     if (disable)
  7087.         chip->charge_full = false;
  7088. }
  7089.  
  7090. static void update_bcl_thresholds(struct fg_chip *chip)
  7091. {
  7092.     u8 data[4];
  7093.     u8 mh_offset = 0, lm_offset = 0;
  7094.     u16 address = 0;
  7095.     int ret = 0;
  7096.  
  7097.     address = settings[FG_MEM_BCL_MH_THRESHOLD].address;
  7098.     mh_offset = settings[FG_MEM_BCL_MH_THRESHOLD].offset;
  7099.     lm_offset = settings[FG_MEM_BCL_LM_THRESHOLD].offset;
  7100.     ret = fg_mem_read(chip, data, address, 4, 0, 1);
  7101.     if (ret)
  7102.         pr_err("Error reading BCL LM & MH threshold rc:%d\n", ret);
  7103.     else
  7104.         pr_debug("Old BCL LM threshold:%x MH threshold:%x\n",
  7105.             data[lm_offset], data[mh_offset]);
  7106.     BCL_MA_TO_ADC(settings[FG_MEM_BCL_MH_THRESHOLD].value, data[mh_offset]);
  7107.     BCL_MA_TO_ADC(settings[FG_MEM_BCL_LM_THRESHOLD].value, data[lm_offset]);
  7108.  
  7109.     ret = fg_mem_write(chip, data, address, 4, 0, 0);
  7110.     if (ret)
  7111.         pr_err("spmi write failed. addr:%03x, ret:%d\n",
  7112.             address, ret);
  7113.     else
  7114.         pr_debug("New BCL LM threshold:%x MH threshold:%x\n",
  7115.             data[lm_offset], data[mh_offset]);
  7116. }
  7117.  
  7118. static int disable_bcl_lpm(struct fg_chip *chip)
  7119. {
  7120.     u8 data[4];
  7121.     u8 lm_offset = 0;
  7122.     u16 address = 0;
  7123.     int rc = 0;
  7124.  
  7125.     address = settings[FG_MEM_BCL_LM_THRESHOLD].address;
  7126.     lm_offset = settings[FG_MEM_BCL_LM_THRESHOLD].offset;
  7127.     rc = fg_mem_read(chip, data, address, 4, 0, 1);
  7128.     if (rc) {
  7129.         pr_err("Error reading BCL LM & MH threshold rc:%d\n", rc);
  7130.         return rc;
  7131.     }
  7132.     pr_debug("Old BCL LM threshold:%x\n", data[lm_offset]);
  7133.  
  7134.     /* Put BCL always above LPM */
  7135.     BCL_MA_TO_ADC(0, data[lm_offset]);
  7136.  
  7137.     rc = fg_mem_write(chip, data, address, 4, 0, 0);
  7138.     if (rc)
  7139.         pr_err("spmi write failed. addr:%03x, rc:%d\n",
  7140.             address, rc);
  7141.     else
  7142.         pr_debug("New BCL LM threshold:%x\n", data[lm_offset]);
  7143.  
  7144.     return rc;
  7145. }
  7146.  
  7147. static void bcl_hi_power_work(struct work_struct *work)
  7148. {
  7149.     struct fg_chip *chip = container_of(work,
  7150.             struct fg_chip,
  7151.             bcl_hi_power_work);
  7152.     int rc;
  7153.  
  7154.     if (chip->bcl_lpm_disabled) {
  7155.         rc = disable_bcl_lpm(chip);
  7156.         if (rc)
  7157.             pr_err("failed to disable bcl low mode %d\n",
  7158.                     rc);
  7159.     } else {
  7160.         update_bcl_thresholds(chip);
  7161.     }
  7162. }
  7163.  
  7164. #define VOLT_UV_TO_VOLTCMP8(volt_uv)    \
  7165.             ((volt_uv - 2500000) / 9766)
  7166. static int update_irq_volt_empty(struct fg_chip *chip)
  7167. {
  7168.     u8 data;
  7169.     int volt_mv = settings[FG_MEM_IRQ_VOLT_EMPTY].value;
  7170.  
  7171.     data = (u8)VOLT_UV_TO_VOLTCMP8(volt_mv * 1000);
  7172.  
  7173.     if (fg_debug_mask & FG_STATUS)
  7174.         pr_info("voltage = %d, converted_raw = %04x\n", volt_mv, data);
  7175.     return fg_mem_write(chip, &data,
  7176.             settings[FG_MEM_IRQ_VOLT_EMPTY].address, 1,
  7177.             settings[FG_MEM_IRQ_VOLT_EMPTY].offset, 0);
  7178. }
  7179.  
  7180. static int update_cutoff_voltage(struct fg_chip *chip)
  7181. {
  7182.     u8 data[2];
  7183.     u16 converted_voltage_raw;
  7184.     s64 voltage_mv = settings[FG_MEM_CUTOFF_VOLTAGE].value;
  7185.  
  7186.     converted_voltage_raw = (s16)MICROUNITS_TO_ADC_RAW(voltage_mv * 1000);
  7187.     data[0] = cpu_to_le16(converted_voltage_raw) & 0xFF;
  7188.     data[1] = cpu_to_le16(converted_voltage_raw) >> 8;
  7189.  
  7190.     if (fg_debug_mask & FG_STATUS)
  7191.         pr_info("voltage = %lld, converted_raw = %04x, data = %02x %02x\n",
  7192.             voltage_mv, converted_voltage_raw, data[0], data[1]);
  7193.     return fg_mem_write(chip, data, settings[FG_MEM_CUTOFF_VOLTAGE].address,
  7194.                 2, settings[FG_MEM_CUTOFF_VOLTAGE].offset, 0);
  7195. }
  7196.  
  7197. static int update_iterm(struct fg_chip *chip)
  7198. {
  7199.     u8 data[2];
  7200.     u16 converted_current_raw;
  7201.     s64 current_ma = -settings[FG_MEM_TERM_CURRENT].value;
  7202.  
  7203.     converted_current_raw = (s16)MICROUNITS_TO_ADC_RAW(current_ma * 1000);
  7204.     data[0] = cpu_to_le16(converted_current_raw) & 0xFF;
  7205.     data[1] = cpu_to_le16(converted_current_raw) >> 8;
  7206.  
  7207.     if (fg_debug_mask & FG_STATUS)
  7208.         pr_info("current = %lld, converted_raw = %04x, data = %02x %02x\n",
  7209.             current_ma, converted_current_raw, data[0], data[1]);
  7210.     return fg_mem_write(chip, data, settings[FG_MEM_TERM_CURRENT].address,
  7211.                 2, settings[FG_MEM_TERM_CURRENT].offset, 0);
  7212. }
  7213.  
  7214. #define OF_READ_SETTING(type, qpnp_dt_property, retval, optional)   \
  7215. do {                                    \
  7216.     if (retval)                         \
  7217.         break;                          \
  7218.                                     \
  7219.     retval = of_property_read_u32(chip->spmi->dev.of_node,      \
  7220.                     "qcom," qpnp_dt_property,   \
  7221.                     &settings[type].value);     \
  7222.                                     \
  7223.     if ((retval == -EINVAL) && optional)                \
  7224.         retval = 0;                     \
  7225.     else if (retval)                        \
  7226.         pr_err("Error reading " #qpnp_dt_property       \
  7227.                 " property rc = %d\n", rc);     \
  7228. } while (0)
  7229.  
  7230. #define OF_READ_PROPERTY(store, qpnp_dt_property, retval, default_val)  \
  7231. do {                                    \
  7232.     if (retval)                         \
  7233.         break;                          \
  7234.                                     \
  7235.     retval = of_property_read_u32(chip->spmi->dev.of_node,      \
  7236.                     "qcom," qpnp_dt_property,   \
  7237.                     &store);            \
  7238.                                     \
  7239.     if (retval == -EINVAL) {                    \
  7240.         retval = 0;                     \
  7241.         store = default_val;                    \
  7242.     } else if (retval) {                        \
  7243.         pr_err("Error reading " #qpnp_dt_property       \
  7244.                 " property rc = %d\n", rc);     \
  7245.     }                               \
  7246. } while (0)
  7247.  
  7248. static int fg_dischg_gain_dt_init(struct fg_chip *chip)
  7249. {
  7250.     struct device_node *node = chip->spmi->dev.of_node;
  7251.     struct property *prop;
  7252.     int i, rc = 0;
  7253.     size_t size;
  7254.  
  7255.     prop = of_find_property(node, "qcom,fg-dischg-voltage-gain-soc",
  7256.             NULL);
  7257.     if (!prop) {
  7258.         pr_err("qcom-fg-dischg-voltage-gain-soc not specified\n");
  7259.         goto out;
  7260.     }
  7261.  
  7262.     size = prop->length / sizeof(u32);
  7263.     if (size != VOLT_GAIN_MAX) {
  7264.         pr_err("Voltage gain SOC specified is of incorrect size\n");
  7265.         goto out;
  7266.     }
  7267.  
  7268.     rc = of_property_read_u32_array(node,
  7269.         "qcom,fg-dischg-voltage-gain-soc", chip->dischg_gain.soc, size);
  7270.     if (rc < 0) {
  7271.         pr_err("Reading qcom-fg-dischg-voltage-gain-soc failed, rc=%d\n",
  7272.             rc);
  7273.         goto out;
  7274.     }
  7275.  
  7276.     for (i = 0; i < VOLT_GAIN_MAX; i++) {
  7277.         if (chip->dischg_gain.soc[i] < 0 ||
  7278.                 chip->dischg_gain.soc[i] > 100) {
  7279.             pr_err("Incorrect dischg-voltage-gain-soc\n");
  7280.             goto out;
  7281.         }
  7282.     }
  7283.  
  7284.     prop = of_find_property(node, "qcom,fg-dischg-med-voltage-gain",
  7285.             NULL);
  7286.     if (!prop) {
  7287.         pr_err("qcom-fg-dischg-med-voltage-gain not specified\n");
  7288.         goto out;
  7289.     }
  7290.  
  7291.     size = prop->length / sizeof(u32);
  7292.     if (size != VOLT_GAIN_MAX) {
  7293.         pr_err("med-voltage-gain specified is of incorrect size\n");
  7294.         goto out;
  7295.     }
  7296.  
  7297.     rc = of_property_read_u32_array(node,
  7298.         "qcom,fg-dischg-med-voltage-gain", chip->dischg_gain.medc_gain,
  7299.         size);
  7300.     if (rc < 0) {
  7301.         pr_err("Reading qcom-fg-dischg-med-voltage-gain failed, rc=%d\n",
  7302.             rc);
  7303.         goto out;
  7304.     }
  7305.  
  7306.     prop = of_find_property(node, "qcom,fg-dischg-high-voltage-gain",
  7307.             NULL);
  7308.     if (!prop) {
  7309.         pr_err("qcom-fg-dischg-high-voltage-gain not specified\n");
  7310.         goto out;
  7311.     }
  7312.  
  7313.     size = prop->length / sizeof(u32);
  7314.     if (size != VOLT_GAIN_MAX) {
  7315.         pr_err("high-voltage-gain specified is of incorrect size\n");
  7316.         goto out;
  7317.     }
  7318.  
  7319.     rc = of_property_read_u32_array(node,
  7320.         "qcom,fg-dischg-high-voltage-gain",
  7321.         chip->dischg_gain.highc_gain, size);
  7322.     if (rc < 0) {
  7323.         pr_err("Reading qcom-fg-dischg-high-voltage-gain failed, rc=%d\n",
  7324.             rc);
  7325.         goto out;
  7326.     }
  7327.  
  7328.     if (fg_debug_mask & FG_STATUS) {
  7329.         for (i = 0; i < VOLT_GAIN_MAX; i++)
  7330.             pr_info("SOC:%d MedC_Gain:%d HighC_Gain: %d\n",
  7331.                 chip->dischg_gain.soc[i],
  7332.                 chip->dischg_gain.medc_gain[i],
  7333.                 chip->dischg_gain.highc_gain[i]);
  7334.     }
  7335.     return 0;
  7336. out:
  7337.     chip->dischg_gain.enable = false;
  7338.     return rc;
  7339. }
  7340.  
  7341. #define DEFAULT_EVALUATION_CURRENT_MA   1000
  7342. static int fg_of_init(struct fg_chip *chip)
  7343. {
  7344.     int rc = 0, sense_type, len = 0;
  7345.     const char *data;
  7346.     struct device_node *node = chip->spmi->dev.of_node;
  7347.     u32 temp[2] = {0};
  7348.  
  7349.     OF_READ_SETTING(FG_MEM_SOFT_HOT, "warm-bat-decidegc", rc, 1);
  7350.     OF_READ_SETTING(FG_MEM_SOFT_COLD, "cool-bat-decidegc", rc, 1);
  7351.     OF_READ_SETTING(FG_MEM_HARD_HOT, "hot-bat-decidegc", rc, 1);
  7352.     OF_READ_SETTING(FG_MEM_HARD_COLD, "cold-bat-decidegc", rc, 1);
  7353.  
  7354.     if (of_find_property(node, "qcom,cold-hot-jeita-hysteresis", NULL)) {
  7355.         int hard_hot = 0, soft_hot = 0, hard_cold = 0, soft_cold = 0;
  7356.  
  7357.         rc = of_property_read_u32_array(node,
  7358.             "qcom,cold-hot-jeita-hysteresis", temp, 2);
  7359.         if (rc) {
  7360.             pr_err("Error reading cold-hot-jeita-hysteresis rc=%d\n",
  7361.                 rc);
  7362.             return rc;
  7363.         }
  7364.  
  7365.         chip->jeita_hysteresis_support = true;
  7366.         chip->cold_hysteresis = temp[0];
  7367.         chip->hot_hysteresis = temp[1];
  7368.         hard_hot = settings[FG_MEM_HARD_HOT].value;
  7369.         soft_hot = settings[FG_MEM_SOFT_HOT].value;
  7370.         hard_cold = settings[FG_MEM_HARD_COLD].value;
  7371.         soft_cold = settings[FG_MEM_SOFT_COLD].value;
  7372.         if (((hard_hot - chip->hot_hysteresis) < soft_hot) ||
  7373.             ((hard_cold + chip->cold_hysteresis) > soft_cold)) {
  7374.             chip->jeita_hysteresis_support = false;
  7375.             pr_err("invalid hysteresis: hot_hysterresis = %d cold_hysteresis = %d\n",
  7376.                 chip->hot_hysteresis, chip->cold_hysteresis);
  7377.         } else {
  7378.             pr_debug("cold_hysteresis = %d, hot_hysteresis = %d\n",
  7379.                 chip->cold_hysteresis, chip->hot_hysteresis);
  7380.         }
  7381.     }
  7382.  
  7383.     OF_READ_SETTING(FG_MEM_BCL_LM_THRESHOLD, "bcl-lm-threshold-ma",
  7384.         rc, 1);
  7385.     OF_READ_SETTING(FG_MEM_BCL_MH_THRESHOLD, "bcl-mh-threshold-ma",
  7386.         rc, 1);
  7387.     OF_READ_SETTING(FG_MEM_TERM_CURRENT, "fg-iterm-ma", rc, 1);
  7388.     OF_READ_SETTING(FG_MEM_CHG_TERM_CURRENT, "fg-chg-iterm-ma", rc, 1);
  7389.     OF_READ_SETTING(FG_MEM_CUTOFF_VOLTAGE, "fg-cutoff-voltage-mv", rc, 1);
  7390.     data = of_get_property(chip->spmi->dev.of_node,
  7391.             "qcom,thermal-coefficients", &len);
  7392.     if (data && len == THERMAL_COEFF_N_BYTES) {
  7393.         memcpy(chip->thermal_coefficients, data, len);
  7394.         chip->use_thermal_coefficients = true;
  7395.     }
  7396.     OF_READ_SETTING(FG_MEM_RESUME_SOC, "resume-soc", rc, 1);
  7397.     settings[FG_MEM_RESUME_SOC].value =
  7398.         DIV_ROUND_CLOSEST(settings[FG_MEM_RESUME_SOC].value
  7399.                 * FULL_SOC_RAW, FULL_CAPACITY);
  7400.     OF_READ_SETTING(FG_MEM_RESUME_SOC, "resume-soc-raw", rc, 1);
  7401.     OF_READ_SETTING(FG_MEM_IRQ_VOLT_EMPTY, "irq-volt-empty-mv", rc, 1);
  7402.     OF_READ_SETTING(FG_MEM_VBAT_EST_DIFF, "fg-vbat-estimate-diff-mv", rc, 1);
  7403.     OF_READ_SETTING(FG_MEM_DELTA_SOC, "fg-delta-soc", rc, 1);
  7404.     OF_READ_SETTING(FG_MEM_BATT_LOW, "fg-vbatt-low-threshold", rc, 1);
  7405.     OF_READ_SETTING(FG_MEM_THERM_DELAY, "fg-therm-delay-us", rc, 1);
  7406.     OF_READ_PROPERTY(chip->learning_data.max_increment,
  7407.             "cl-max-increment-deciperc", rc, 5);
  7408.     OF_READ_PROPERTY(chip->learning_data.max_decrement,
  7409.             "cl-max-decrement-deciperc", rc, 100);
  7410.     OF_READ_PROPERTY(chip->learning_data.max_temp,
  7411.             "cl-max-temp-decidegc", rc, 450);
  7412.     OF_READ_PROPERTY(chip->learning_data.min_temp,
  7413.             "cl-min-temp-decidegc", rc, 150);
  7414.     OF_READ_PROPERTY(chip->learning_data.max_start_soc,
  7415.             "cl-max-start-capacity", rc, 15);
  7416.     OF_READ_PROPERTY(chip->learning_data.vbat_est_thr_uv,
  7417.             "cl-vbat-est-thr-uv", rc, 40000);
  7418.     OF_READ_PROPERTY(chip->learning_data.max_cap_limit,
  7419.             "cl-max-limit-deciperc", rc, 0);
  7420.     OF_READ_PROPERTY(chip->learning_data.min_cap_limit,
  7421.             "cl-min-limit-deciperc", rc, 0);
  7422.     OF_READ_PROPERTY(chip->evaluation_current,
  7423.             "aging-eval-current-ma", rc,
  7424.             DEFAULT_EVALUATION_CURRENT_MA);
  7425.     OF_READ_PROPERTY(chip->cc_cv_threshold_mv,
  7426.             "fg-cc-cv-threshold-mv", rc, 0);
  7427.     if (of_property_read_bool(chip->spmi->dev.of_node,
  7428.                 "qcom,capacity-learning-on"))
  7429.         chip->batt_aging_mode = FG_AGING_CC;
  7430.     else if (of_property_read_bool(chip->spmi->dev.of_node,
  7431.                 "qcom,capacity-estimation-on"))
  7432.         chip->batt_aging_mode = FG_AGING_ESR;
  7433.     else
  7434.         chip->batt_aging_mode = FG_AGING_NONE;
  7435.     if (chip->batt_aging_mode == FG_AGING_CC) {
  7436.         chip->learning_data.feedback_on = of_property_read_bool(
  7437.                     chip->spmi->dev.of_node,
  7438.                     "qcom,capacity-learning-feedback");
  7439.     }
  7440.     if (fg_debug_mask & FG_AGING)
  7441.         pr_info("battery aging mode: %d\n", chip->batt_aging_mode);
  7442.  
  7443.     /* Get the use-otp-profile property */
  7444.     chip->use_otp_profile = of_property_read_bool(
  7445.             chip->spmi->dev.of_node,
  7446.             "qcom,use-otp-profile");
  7447.     chip->hold_soc_while_full = of_property_read_bool(
  7448.             chip->spmi->dev.of_node,
  7449.             "qcom,hold-soc-while-full");
  7450.  
  7451.     sense_type = of_property_read_bool(chip->spmi->dev.of_node,
  7452.                     "qcom,ext-sense-type");
  7453.     chip->fg_force_restart_enable =
  7454.             of_property_read_bool(chip->spmi->dev.of_node,
  7455.             "qcom,fg-force-restart-enable");
  7456.  
  7457.     if (rc == 0) {
  7458.         if (fg_sense_type < 0)
  7459.             fg_sense_type = sense_type;
  7460.  
  7461.         if (fg_debug_mask & FG_STATUS) {
  7462.             if (fg_sense_type == INTERNAL_CURRENT_SENSE)
  7463.                 pr_info("Using internal sense\n");
  7464.             else if (fg_sense_type == EXTERNAL_CURRENT_SENSE)
  7465.                 pr_info("Using external sense\n");
  7466.             else
  7467.                 pr_info("Using default sense\n");
  7468.         }
  7469.     } else {
  7470.         rc = 0;
  7471.     }
  7472.  
  7473.     chip->bad_batt_detection_en = of_property_read_bool(node,
  7474.                 "qcom,bad-battery-detection-enable");
  7475.  
  7476.     chip->sw_rbias_ctrl = of_property_read_bool(node,
  7477.                 "qcom,sw-rbias-control");
  7478.  
  7479.     chip->cyc_ctr.en = of_property_read_bool(node,
  7480.                 "qcom,cycle-counter-en");
  7481.     if (chip->cyc_ctr.en)
  7482.         chip->cyc_ctr.id = 1;
  7483.  
  7484.     chip->esr_pulse_tune_en = of_property_read_bool(node,
  7485.                     "qcom,esr-pulse-tuning-en");
  7486.  
  7487.     chip->soc_slope_limiter_en = of_property_read_bool(node,
  7488.                     "qcom,fg-control-slope-limiter");
  7489.     if (chip->soc_slope_limiter_en) {
  7490.         OF_READ_PROPERTY(chip->slope_limit_temp,
  7491.             "fg-slope-limit-temp-threshold", rc,
  7492.             SLOPE_LIMIT_TEMP_THRESHOLD);
  7493.  
  7494.         OF_READ_PROPERTY(chip->slope_limit_coeffs[LOW_TEMP_CHARGE],
  7495.             "fg-slope-limit-low-temp-chg", rc,
  7496.             SLOPE_LIMIT_LOW_TEMP_CHG);
  7497.  
  7498.         OF_READ_PROPERTY(chip->slope_limit_coeffs[HIGH_TEMP_CHARGE],
  7499.             "fg-slope-limit-high-temp-chg", rc,
  7500.             SLOPE_LIMIT_HIGH_TEMP_CHG);
  7501.  
  7502.         OF_READ_PROPERTY(chip->slope_limit_coeffs[LOW_TEMP_DISCHARGE],
  7503.             "fg-slope-limit-low-temp-dischg", rc,
  7504.             SLOPE_LIMIT_LOW_TEMP_DISCHG);
  7505.  
  7506.         OF_READ_PROPERTY(chip->slope_limit_coeffs[HIGH_TEMP_DISCHARGE],
  7507.             "fg-slope-limit-high-temp-dischg", rc,
  7508.             SLOPE_LIMIT_HIGH_TEMP_DISCHG);
  7509.  
  7510.         if (fg_debug_mask & FG_STATUS)
  7511.             pr_info("slope-limiter, temp: %d coeffs: [%d %d %d %d]\n",
  7512.                 chip->slope_limit_temp,
  7513.                 chip->slope_limit_coeffs[LOW_TEMP_CHARGE],
  7514.                 chip->slope_limit_coeffs[HIGH_TEMP_CHARGE],
  7515.                 chip->slope_limit_coeffs[LOW_TEMP_DISCHARGE],
  7516.                 chip->slope_limit_coeffs[HIGH_TEMP_DISCHARGE]);
  7517.     }
  7518.  
  7519.     OF_READ_PROPERTY(chip->rconn_mohm, "fg-rconn-mohm", rc, 0);
  7520.  
  7521.     chip->dischg_gain.enable = of_property_read_bool(node,
  7522.                     "qcom,fg-dischg-voltage-gain-ctrl");
  7523.     if (chip->dischg_gain.enable) {
  7524.         rc = fg_dischg_gain_dt_init(chip);
  7525.         if (rc) {
  7526.             pr_err("Error in reading dischg_gain parameters, rc=%d\n",
  7527.                 rc);
  7528.             rc = 0;
  7529.         }
  7530.     }
  7531.  
  7532.     chip->use_vbat_low_empty_soc = of_property_read_bool(node,
  7533.                     "qcom,fg-use-vbat-low-empty-soc");
  7534.  
  7535.     OF_READ_PROPERTY(chip->batt_temp_low_limit,
  7536.             "fg-batt-temp-low-limit", rc, BATT_TEMP_LOW_LIMIT);
  7537.  
  7538.     OF_READ_PROPERTY(chip->batt_temp_high_limit,
  7539.             "fg-batt-temp-high-limit", rc, BATT_TEMP_HIGH_LIMIT);
  7540.  
  7541.     if (fg_debug_mask & FG_STATUS)
  7542.         pr_info("batt-temp-low_limit: %d batt-temp-high_limit: %d\n",
  7543.             chip->batt_temp_low_limit, chip->batt_temp_high_limit);
  7544.  
  7545.     OF_READ_PROPERTY(chip->cc_soc_limit_pct, "fg-cc-soc-limit-pct", rc, 0);
  7546.  
  7547.     if (fg_debug_mask & FG_STATUS)
  7548.         pr_info("cc-soc-limit-pct: %d\n", chip->cc_soc_limit_pct);
  7549.  
  7550.     chip->batt_info_restore = of_property_read_bool(node,
  7551.                     "qcom,fg-restore-batt-info");
  7552.  
  7553.     chip->low_batt_temp_comp = of_property_read_bool(node,
  7554.                     "qcom,low-batt-temp-comp");
  7555.  
  7556.     if (fg_debug_mask & FG_STATUS)
  7557.         pr_info("restore: %d validate_by_ocv: %d range_pct: %d\n",
  7558.             chip->batt_info_restore, fg_batt_valid_ocv,
  7559.             fg_batt_range_pct);
  7560.  
  7561.     return rc;
  7562. }
  7563.  
  7564. static int fg_init_irqs(struct fg_chip *chip)
  7565. {
  7566.     int rc = 0;
  7567.     struct resource *resource;
  7568.     struct spmi_resource *spmi_resource;
  7569.     u8 subtype;
  7570.     struct spmi_device *spmi = chip->spmi;
  7571.  
  7572.     spmi_for_each_container_dev(spmi_resource, spmi) {
  7573.         if (!spmi_resource) {
  7574.             pr_err("fg: spmi resource absent\n");
  7575.             return rc;
  7576.         }
  7577.  
  7578.         resource = spmi_get_resource(spmi, spmi_resource,
  7579.                         IORESOURCE_MEM, 0);
  7580.         if (!(resource && resource->start)) {
  7581.             pr_err("node %s IO resource absent!\n",
  7582.                 spmi->dev.of_node->full_name);
  7583.             return rc;
  7584.         }
  7585.  
  7586.         if ((resource->start == chip->vbat_adc_addr) ||
  7587.                 (resource->start == chip->ibat_adc_addr) ||
  7588.                 (resource->start == chip->tp_rev_addr))
  7589.             continue;
  7590.  
  7591.         rc = fg_read(chip, &subtype,
  7592.                 resource->start + REG_OFFSET_PERP_SUBTYPE, 1);
  7593.         if (rc) {
  7594.             pr_err("Peripheral subtype read failed rc=%d\n", rc);
  7595.             return rc;
  7596.         }
  7597.  
  7598.         switch (subtype) {
  7599.         case FG_SOC:
  7600.             chip->soc_irq[FULL_SOC].irq = spmi_get_irq_byname(
  7601.                     chip->spmi, spmi_resource, "full-soc");
  7602.             if (chip->soc_irq[FULL_SOC].irq < 0) {
  7603.                 pr_err("Unable to get full-soc irq\n");
  7604.                 return rc;
  7605.             }
  7606.             chip->soc_irq[EMPTY_SOC].irq = spmi_get_irq_byname(
  7607.                     chip->spmi, spmi_resource, "empty-soc");
  7608.             if (chip->soc_irq[EMPTY_SOC].irq < 0) {
  7609.                 pr_err("Unable to get empty-soc irq\n");
  7610.                 return rc;
  7611.             }
  7612.             chip->soc_irq[DELTA_SOC].irq = spmi_get_irq_byname(
  7613.                     chip->spmi, spmi_resource, "delta-soc");
  7614.             if (chip->soc_irq[DELTA_SOC].irq < 0) {
  7615.                 pr_err("Unable to get delta-soc irq\n");
  7616.                 return rc;
  7617.             }
  7618.             chip->soc_irq[FIRST_EST_DONE].irq = spmi_get_irq_byname(
  7619.                 chip->spmi, spmi_resource, "first-est-done");
  7620.             if (chip->soc_irq[FIRST_EST_DONE].irq < 0) {
  7621.                 pr_err("Unable to get first-est-done irq\n");
  7622.                 return rc;
  7623.             }
  7624.  
  7625.             rc = devm_request_threaded_irq(chip->dev,
  7626.                 chip->soc_irq[FULL_SOC].irq, NULL,
  7627.                 fg_soc_irq_handler,
  7628.                 IRQF_TRIGGER_RISING | IRQF_ONESHOT,
  7629.                 "full-soc", chip);
  7630.             if (rc < 0) {
  7631.                 pr_err("Can't request %d full-soc: %d\n",
  7632.                     chip->soc_irq[FULL_SOC].irq, rc);
  7633.                 return rc;
  7634.             }
  7635.             enable_irq_wake(chip->soc_irq[FULL_SOC].irq);
  7636.             chip->full_soc_irq_enabled = true;
  7637.  
  7638.             if (!chip->use_vbat_low_empty_soc) {
  7639.                 rc = devm_request_irq(chip->dev,
  7640.                     chip->soc_irq[EMPTY_SOC].irq,
  7641.                     fg_empty_soc_irq_handler,
  7642.                     IRQF_TRIGGER_RISING |
  7643.                     IRQF_TRIGGER_FALLING,
  7644.                     "empty-soc", chip);
  7645.                 if (rc < 0) {
  7646.                     pr_err("Can't request %d empty-soc: %d\n",
  7647.                         chip->soc_irq[EMPTY_SOC].irq,
  7648.                         rc);
  7649.                     return rc;
  7650.                 }
  7651.             }
  7652.  
  7653.             rc = devm_request_threaded_irq(chip->dev,
  7654.                 chip->soc_irq[DELTA_SOC].irq, NULL,
  7655.                 fg_soc_irq_handler,
  7656.                 IRQF_TRIGGER_RISING | IRQF_ONESHOT,
  7657.                 "delta-soc", chip);
  7658.             if (rc < 0) {
  7659.                 pr_err("Can't request %d delta-soc: %d\n",
  7660.                     chip->soc_irq[DELTA_SOC].irq, rc);
  7661.                 return rc;
  7662.             }
  7663.             rc = devm_request_irq(chip->dev,
  7664.                 chip->soc_irq[FIRST_EST_DONE].irq,
  7665.                 fg_first_soc_irq_handler, IRQF_TRIGGER_RISING,
  7666.                 "first-est-done", chip);
  7667.             if (rc < 0) {
  7668.                 pr_err("Can't request %d delta-soc: %d\n",
  7669.                     chip->soc_irq[FIRST_EST_DONE].irq, rc);
  7670.                 return rc;
  7671.             }
  7672.  
  7673.             enable_irq_wake(chip->soc_irq[DELTA_SOC].irq);
  7674.             if (!chip->use_vbat_low_empty_soc)
  7675.                 enable_irq_wake(chip->soc_irq[EMPTY_SOC].irq);
  7676.             break;
  7677.         case FG_MEMIF:
  7678.             chip->mem_irq[FG_MEM_AVAIL].irq = spmi_get_irq_byname(
  7679.                     chip->spmi, spmi_resource, "mem-avail");
  7680.             if (chip->mem_irq[FG_MEM_AVAIL].irq < 0) {
  7681.                 pr_err("Unable to get mem-avail irq\n");
  7682.                 return rc;
  7683.             }
  7684.             rc = devm_request_irq(chip->dev,
  7685.                     chip->mem_irq[FG_MEM_AVAIL].irq,
  7686.                     fg_mem_avail_irq_handler,
  7687.                     IRQF_TRIGGER_RISING |
  7688.                     IRQF_TRIGGER_FALLING,
  7689.                     "mem-avail", chip);
  7690.             if (rc < 0) {
  7691.                 pr_err("Can't request %d mem-avail: %d\n",
  7692.                     chip->mem_irq[FG_MEM_AVAIL].irq, rc);
  7693.                 return rc;
  7694.             }
  7695.             break;
  7696.         case FG_BATT:
  7697.             chip->batt_irq[JEITA_SOFT_COLD].irq =
  7698.                 spmi_get_irq_byname(chip->spmi, spmi_resource,
  7699.                         "soft-cold");
  7700.             if (chip->batt_irq[JEITA_SOFT_COLD].irq < 0) {
  7701.                 pr_err("Unable to get soft-cold irq\n");
  7702.                 rc = -EINVAL;
  7703.                 return rc;
  7704.             }
  7705.             rc = devm_request_threaded_irq(chip->dev,
  7706.                     chip->batt_irq[JEITA_SOFT_COLD].irq,
  7707.                     NULL,
  7708.                     fg_jeita_soft_cold_irq_handler,
  7709.                     IRQF_TRIGGER_RISING |
  7710.                     IRQF_TRIGGER_FALLING |
  7711.                     IRQF_ONESHOT,
  7712.                     "soft-cold", chip);
  7713.             if (rc < 0) {
  7714.                 pr_err("Can't request %d soft-cold: %d\n",
  7715.                     chip->batt_irq[JEITA_SOFT_COLD].irq,
  7716.                                 rc);
  7717.                 return rc;
  7718.             }
  7719.             disable_irq(chip->batt_irq[JEITA_SOFT_COLD].irq);
  7720.             chip->batt_irq[JEITA_SOFT_COLD].disabled = true;
  7721.             chip->batt_irq[JEITA_SOFT_HOT].irq =
  7722.                 spmi_get_irq_byname(chip->spmi, spmi_resource,
  7723.                     "soft-hot");
  7724.             if (chip->batt_irq[JEITA_SOFT_HOT].irq < 0) {
  7725.                 pr_err("Unable to get soft-hot irq\n");
  7726.                 rc = -EINVAL;
  7727.                 return rc;
  7728.             }
  7729.             rc = devm_request_threaded_irq(chip->dev,
  7730.                     chip->batt_irq[JEITA_SOFT_HOT].irq,
  7731.                     NULL,
  7732.                     fg_jeita_soft_hot_irq_handler,
  7733.                     IRQF_TRIGGER_RISING |
  7734.                     IRQF_TRIGGER_FALLING |
  7735.                     IRQF_ONESHOT,
  7736.                     "soft-hot", chip);
  7737.             if (rc < 0) {
  7738.                 pr_err("Can't request %d soft-hot: %d\n",
  7739.                     chip->batt_irq[JEITA_SOFT_HOT].irq, rc);
  7740.                 return rc;
  7741.             }
  7742.             disable_irq(chip->batt_irq[JEITA_SOFT_HOT].irq);
  7743.             chip->batt_irq[JEITA_SOFT_HOT].disabled = true;
  7744.             chip->batt_irq[BATT_MISSING].irq = spmi_get_irq_byname(
  7745.                     chip->spmi, spmi_resource,
  7746.                     "batt-missing");
  7747.             if (chip->batt_irq[BATT_MISSING].irq < 0) {
  7748.                 pr_err("Unable to get batt-missing irq\n");
  7749.                 rc = -EINVAL;
  7750.                 return rc;
  7751.             }
  7752.             rc = devm_request_threaded_irq(chip->dev,
  7753.                     chip->batt_irq[BATT_MISSING].irq,
  7754.                     NULL,
  7755.                     fg_batt_missing_irq_handler,
  7756.                     IRQF_TRIGGER_RISING |
  7757.                     IRQF_TRIGGER_FALLING |
  7758.                     IRQF_ONESHOT,
  7759.                     "batt-missing", chip);
  7760.             if (rc < 0) {
  7761.                 pr_err("Can't request %d batt-missing: %d\n",
  7762.                     chip->batt_irq[BATT_MISSING].irq, rc);
  7763.                 return rc;
  7764.             }
  7765.             chip->batt_irq[VBATT_LOW].irq = spmi_get_irq_byname(
  7766.                     chip->spmi, spmi_resource,
  7767.                     "vbatt-low");
  7768.             if (chip->batt_irq[VBATT_LOW].irq < 0) {
  7769.                 pr_err("Unable to get vbatt-low irq\n");
  7770.                 rc = -EINVAL;
  7771.                 return rc;
  7772.             }
  7773.             rc = devm_request_irq(chip->dev,
  7774.                     chip->batt_irq[VBATT_LOW].irq,
  7775.                     fg_vbatt_low_handler,
  7776.                     IRQF_TRIGGER_RISING |
  7777.                     IRQF_TRIGGER_FALLING,
  7778.                     "vbatt-low", chip);
  7779.             if (rc < 0) {
  7780.                 pr_err("Can't request %d vbatt-low: %d\n",
  7781.                     chip->batt_irq[VBATT_LOW].irq, rc);
  7782.                 return rc;
  7783.             }
  7784.             if (chip->use_vbat_low_empty_soc) {
  7785.                 enable_irq_wake(chip->batt_irq[VBATT_LOW].irq);
  7786.                 chip->vbat_low_irq_enabled = true;
  7787.             } else {
  7788.                 disable_irq_nosync(
  7789.                     chip->batt_irq[VBATT_LOW].irq);
  7790.                 chip->vbat_low_irq_enabled = false;
  7791.             }
  7792.             break;
  7793.         case FG_ADC:
  7794.             break;
  7795.         default:
  7796.             pr_err("subtype %d\n", subtype);
  7797.             return -EINVAL;
  7798.         }
  7799.     }
  7800.  
  7801.     chip->irqs_enabled = true;
  7802.     return rc;
  7803. }
  7804.  
  7805. static void fg_cancel_all_works(struct fg_chip *chip)
  7806. {
  7807.     cancel_delayed_work_sync(&chip->check_sanity_work);
  7808.     cancel_delayed_work_sync(&chip->update_sram_data);
  7809.     cancel_delayed_work_sync(&chip->update_temp_work);
  7810.     cancel_delayed_work_sync(&chip->update_jeita_setting);
  7811.     cancel_delayed_work_sync(&chip->check_empty_work);
  7812.     cancel_delayed_work_sync(&chip->batt_profile_init);
  7813.     alarm_try_to_cancel(&chip->fg_cap_learning_alarm);
  7814.     alarm_try_to_cancel(&chip->hard_jeita_alarm);
  7815.     if (!chip->ima_error_handling)
  7816.         cancel_work_sync(&chip->ima_error_recovery_work);
  7817.     cancel_work_sync(&chip->rslow_comp_work);
  7818.     cancel_work_sync(&chip->set_resume_soc_work);
  7819.     cancel_work_sync(&chip->fg_cap_learning_work);
  7820.     cancel_work_sync(&chip->dump_sram);
  7821.     cancel_work_sync(&chip->status_change_work);
  7822.     cancel_work_sync(&chip->cycle_count_work);
  7823.     cancel_work_sync(&chip->update_esr_work);
  7824.     cancel_work_sync(&chip->sysfs_restart_work);
  7825.     cancel_work_sync(&chip->gain_comp_work);
  7826.     cancel_work_sync(&chip->init_work);
  7827.     cancel_work_sync(&chip->charge_full_work);
  7828.     cancel_work_sync(&chip->bcl_hi_power_work);
  7829.     cancel_work_sync(&chip->esr_extract_config_work);
  7830.     cancel_work_sync(&chip->slope_limiter_work);
  7831.     cancel_work_sync(&chip->dischg_gain_work);
  7832.     cancel_work_sync(&chip->cc_soc_store_work);
  7833. }
  7834.  
  7835. static void fg_cleanup(struct fg_chip *chip)
  7836. {
  7837.     fg_cancel_all_works(chip);
  7838.     power_supply_unregister(&chip->bms_psy);
  7839.     mutex_destroy(&chip->rslow_comp.lock);
  7840.     mutex_destroy(&chip->rw_lock);
  7841.     mutex_destroy(&chip->cyc_ctr.lock);
  7842.     mutex_destroy(&chip->learning_data.learning_lock);
  7843.     mutex_destroy(&chip->sysfs_restart_lock);
  7844.     mutex_destroy(&chip->ima_recovery_lock);
  7845.     wakeup_source_trash(&chip->resume_soc_wakeup_source.source);
  7846.     wakeup_source_trash(&chip->empty_check_wakeup_source.source);
  7847.     wakeup_source_trash(&chip->memif_wakeup_source.source);
  7848.     wakeup_source_trash(&chip->profile_wakeup_source.source);
  7849.     wakeup_source_trash(&chip->update_temp_wakeup_source.source);
  7850.     wakeup_source_trash(&chip->update_sram_wakeup_source.source);
  7851.     wakeup_source_trash(&chip->gain_comp_wakeup_source.source);
  7852.     wakeup_source_trash(&chip->capacity_learning_wakeup_source.source);
  7853.     wakeup_source_trash(&chip->esr_extract_wakeup_source.source);
  7854.     wakeup_source_trash(&chip->slope_limit_wakeup_source.source);
  7855.     wakeup_source_trash(&chip->dischg_gain_wakeup_source.source);
  7856.     wakeup_source_trash(&chip->fg_reset_wakeup_source.source);
  7857.     wakeup_source_trash(&chip->cc_soc_wakeup_source.source);
  7858.     wakeup_source_trash(&chip->sanity_wakeup_source.source);
  7859. }
  7860.  
  7861. static int fg_remove(struct spmi_device *spmi)
  7862. {
  7863.     struct fg_chip *chip = dev_get_drvdata(&spmi->dev);
  7864.  
  7865.     fg_cleanup(chip);
  7866.     dev_set_drvdata(&spmi->dev, NULL);
  7867.     return 0;
  7868. }
  7869.  
  7870. static int fg_memif_data_open(struct inode *inode, struct file *file)
  7871. {
  7872.     struct fg_log_buffer *log;
  7873.     struct fg_trans *trans;
  7874.     u8 *data_buf;
  7875.  
  7876.     size_t logbufsize = SZ_4K;
  7877.     size_t databufsize = SZ_4K;
  7878.  
  7879.     if (!dbgfs_data.chip) {
  7880.         pr_err("Not initialized data\n");
  7881.         return -EINVAL;
  7882.     }
  7883.  
  7884.     /* Per file "transaction" data */
  7885.     trans = kzalloc(sizeof(*trans), GFP_KERNEL);
  7886.     if (!trans) {
  7887.         pr_err("Unable to allocate memory for transaction data\n");
  7888.         return -ENOMEM;
  7889.     }
  7890.  
  7891.     /* Allocate log buffer */
  7892.     log = kzalloc(logbufsize, GFP_KERNEL);
  7893.  
  7894.     if (!log) {
  7895.         kfree(trans);
  7896.         pr_err("Unable to allocate memory for log buffer\n");
  7897.         return -ENOMEM;
  7898.     }
  7899.  
  7900.     log->rpos = 0;
  7901.     log->wpos = 0;
  7902.     log->len = logbufsize - sizeof(*log);
  7903.  
  7904.     /* Allocate data buffer */
  7905.     data_buf = kzalloc(databufsize, GFP_KERNEL);
  7906.  
  7907.     if (!data_buf) {
  7908.         kfree(trans);
  7909.         kfree(log);
  7910.         pr_err("Unable to allocate memory for data buffer\n");
  7911.         return -ENOMEM;
  7912.     }
  7913.  
  7914.     trans->log = log;
  7915.     trans->data = data_buf;
  7916.     trans->cnt = dbgfs_data.cnt;
  7917.     trans->addr = dbgfs_data.addr;
  7918.     trans->chip = dbgfs_data.chip;
  7919.     trans->offset = trans->addr;
  7920.     mutex_init(&trans->memif_dfs_lock);
  7921.  
  7922.     file->private_data = trans;
  7923.     return 0;
  7924. }
  7925.  
  7926. static int fg_memif_dfs_close(struct inode *inode, struct file *file)
  7927. {
  7928.     struct fg_trans *trans = file->private_data;
  7929.  
  7930.     if (trans && trans->log && trans->data) {
  7931.         file->private_data = NULL;
  7932.         mutex_destroy(&trans->memif_dfs_lock);
  7933.         kfree(trans->log);
  7934.         kfree(trans->data);
  7935.         kfree(trans);
  7936.     }
  7937.  
  7938.     return 0;
  7939. }
  7940.  
  7941. /**
  7942.  * print_to_log: format a string and place into the log buffer
  7943.  * @log: The log buffer to place the result into.
  7944.  * @fmt: The format string to use.
  7945.  * @...: The arguments for the format string.
  7946.  *
  7947.  * The return value is the number of characters written to @log buffer
  7948.  * not including the trailing '\0'.
  7949.  */
  7950. static int print_to_log(struct fg_log_buffer *log, const char *fmt, ...)
  7951. {
  7952.     va_list args;
  7953.     int cnt;
  7954.     char *buf = &log->data[log->wpos];
  7955.     size_t size = log->len - log->wpos;
  7956.  
  7957.     va_start(args, fmt);
  7958.     cnt = vscnprintf(buf, size, fmt, args);
  7959.     va_end(args);
  7960.  
  7961.     log->wpos += cnt;
  7962.     return cnt;
  7963. }
  7964.  
  7965. /**
  7966.  * write_next_line_to_log: Writes a single "line" of data into the log buffer
  7967.  * @trans: Pointer to SRAM transaction data.
  7968.  * @offset: SRAM address offset to start reading from.
  7969.  * @pcnt: Pointer to 'cnt' variable.  Indicates the number of bytes to read.
  7970.  *
  7971.  * The 'offset' is a 12-bit SRAM address.
  7972.  *
  7973.  * On a successful read, the pcnt is decremented by the number of data
  7974.  * bytes read from the SRAM.  When the cnt reaches 0, all requested bytes have
  7975.  * been read.
  7976.  */
  7977. static int
  7978. write_next_line_to_log(struct fg_trans *trans, int offset, size_t *pcnt)
  7979. {
  7980.     int i, j;
  7981.     u8 data[ITEMS_PER_LINE];
  7982.     struct fg_log_buffer *log = trans->log;
  7983.  
  7984.     int cnt = 0;
  7985.     int padding = offset % ITEMS_PER_LINE;
  7986.     int items_to_read = min(ARRAY_SIZE(data) - padding, *pcnt);
  7987.     int items_to_log = min(ITEMS_PER_LINE, padding + items_to_read);
  7988.  
  7989.     /* Buffer needs enough space for an entire line */
  7990.     if ((log->len - log->wpos) < MAX_LINE_LENGTH)
  7991.         goto done;
  7992.  
  7993.     memcpy(data, trans->data + (offset - trans->addr), items_to_read);
  7994.  
  7995.     *pcnt -= items_to_read;
  7996.  
  7997.     /* Each line starts with the aligned offset (12-bit address) */
  7998.     cnt = print_to_log(log, "%3.3X ", offset & 0xfff);
  7999.     if (cnt == 0)
  8000.         goto done;
  8001.  
  8002.     /* If the offset is unaligned, add padding to right justify items */
  8003.     for (i = 0; i < padding; ++i) {
  8004.         cnt = print_to_log(log, "-- ");
  8005.         if (cnt == 0)
  8006.             goto done;
  8007.     }
  8008.  
  8009.     /* Log the data items */
  8010.     for (j = 0; i < items_to_log; ++i, ++j) {
  8011.         cnt = print_to_log(log, "%2.2X ", data[j]);
  8012.         if (cnt == 0)
  8013.             goto done;
  8014.     }
  8015.  
  8016.     /* If the last character was a space, then replace it with a newline */
  8017.     if (log->wpos > 0 && log->data[log->wpos - 1] == ' ')
  8018.         log->data[log->wpos - 1] = '\n';
  8019.  
  8020. done:
  8021.     return cnt;
  8022. }
  8023.  
  8024. /**
  8025.  * get_log_data - reads data from SRAM and saves to the log buffer
  8026.  * @trans: Pointer to SRAM transaction data.
  8027.  *
  8028.  * Returns the number of "items" read or SPMI error code for read failures.
  8029.  */
  8030. static int get_log_data(struct fg_trans *trans)
  8031. {
  8032.     int cnt, rc;
  8033.     int last_cnt;
  8034.     int items_read;
  8035.     int total_items_read = 0;
  8036.     u32 offset = trans->offset;
  8037.     size_t item_cnt = trans->cnt;
  8038.     struct fg_log_buffer *log = trans->log;
  8039.  
  8040.     if (item_cnt == 0)
  8041.         return 0;
  8042.  
  8043.     if (item_cnt > SZ_4K) {
  8044.         pr_err("Reading too many bytes\n");
  8045.         return -EINVAL;
  8046.     }
  8047.  
  8048.     rc = fg_mem_read(trans->chip, trans->data,
  8049.             trans->addr, trans->cnt, 0, 0);
  8050.     if (rc) {
  8051.         pr_err("dump failed: rc = %d\n", rc);
  8052.         return rc;
  8053.     }
  8054.     /* Reset the log buffer 'pointers' */
  8055.     log->wpos = log->rpos = 0;
  8056.  
  8057.     /* Keep reading data until the log is full */
  8058.     do {
  8059.         last_cnt = item_cnt;
  8060.         cnt = write_next_line_to_log(trans, offset, &item_cnt);
  8061.         items_read = last_cnt - item_cnt;
  8062.         offset += items_read;
  8063.         total_items_read += items_read;
  8064.     } while (cnt && item_cnt > 0);
  8065.  
  8066.     /* Adjust the transaction offset and count */
  8067.     trans->cnt = item_cnt;
  8068.     trans->offset += total_items_read;
  8069.  
  8070.     return total_items_read;
  8071. }
  8072.  
  8073. /**
  8074.  * fg_memif_dfs_reg_read: reads value(s) from SRAM and fills user's buffer a
  8075.  *  byte array (coded as string)
  8076.  * @file: file pointer
  8077.  * @buf: where to put the result
  8078.  * @count: maximum space available in @buf
  8079.  * @ppos: starting position
  8080.  * @return number of user bytes read, or negative error value
  8081.  */
  8082. static ssize_t fg_memif_dfs_reg_read(struct file *file, char __user *buf,
  8083.     size_t count, loff_t *ppos)
  8084. {
  8085.     struct fg_trans *trans = file->private_data;
  8086.     struct fg_log_buffer *log = trans->log;
  8087.     size_t ret;
  8088.     size_t len;
  8089.  
  8090.     mutex_lock(&trans->memif_dfs_lock);
  8091.     /* Is the the log buffer empty */
  8092.     if (log->rpos >= log->wpos) {
  8093.         if (get_log_data(trans) <= 0) {
  8094.             len = 0;
  8095.             goto unlock_mutex;
  8096.         }
  8097.     }
  8098.  
  8099.     len = min(count, log->wpos - log->rpos);
  8100.  
  8101.     ret = copy_to_user(buf, &log->data[log->rpos], len);
  8102.     if (ret == len) {
  8103.         pr_err("error copy sram register values to user\n");
  8104.         len = -EFAULT;
  8105.         goto unlock_mutex;
  8106.     }
  8107.  
  8108.     /* 'ret' is the number of bytes not copied */
  8109.     len -= ret;
  8110.  
  8111.     *ppos += len;
  8112.     log->rpos += len;
  8113.  
  8114. unlock_mutex:
  8115.     mutex_unlock(&trans->memif_dfs_lock);
  8116.     return len;
  8117. }
  8118.  
  8119. /**
  8120.  * fg_memif_dfs_reg_write: write user's byte array (coded as string) to SRAM.
  8121.  * @file: file pointer
  8122.  * @buf: user data to be written.
  8123.  * @count: maximum space available in @buf
  8124.  * @ppos: starting position
  8125.  * @return number of user byte written, or negative error value
  8126.  */
  8127. static ssize_t fg_memif_dfs_reg_write(struct file *file, const char __user *buf,
  8128.             size_t count, loff_t *ppos)
  8129. {
  8130.     int bytes_read;
  8131.     int data;
  8132.     int pos = 0;
  8133.     int cnt = 0;
  8134.     u8  *values;
  8135.     size_t ret = 0;
  8136.     char *kbuf;
  8137.     u32 offset;
  8138.  
  8139.     struct fg_trans *trans = file->private_data;
  8140.  
  8141.     mutex_lock(&trans->memif_dfs_lock);
  8142.     offset = trans->offset;
  8143.  
  8144.     /* Make a copy of the user data */
  8145.     kbuf = kmalloc(count + 1, GFP_KERNEL);
  8146.     if (!kbuf) {
  8147.         ret = -ENOMEM;
  8148.         goto unlock_mutex;
  8149.     }
  8150.  
  8151.     ret = copy_from_user(kbuf, buf, count);
  8152.     if (ret == count) {
  8153.         pr_err("failed to copy data from user\n");
  8154.         ret = -EFAULT;
  8155.         goto free_buf;
  8156.     }
  8157.  
  8158.     count -= ret;
  8159.     *ppos += count;
  8160.     kbuf[count] = '\0';
  8161.  
  8162.     /* Override the text buffer with the raw data */
  8163.     values = kbuf;
  8164.  
  8165.     /* Parse the data in the buffer.  It should be a string of numbers */
  8166.     while ((pos < count) &&
  8167.         sscanf(kbuf + pos, "%i%n", &data, &bytes_read) == 1) {
  8168.         /*
  8169.          * We shouldn't be receiving a string of characters that
  8170.          * exceeds a size of 5 to keep this functionally correct.
  8171.          * Also, we should make sure that pos never gets overflowed
  8172.          * beyond the limit.
  8173.          */
  8174.         if (bytes_read > 5 || bytes_read > INT_MAX - pos) {
  8175.             cnt = 0;
  8176.             ret = -EINVAL;
  8177.             break;
  8178.         }
  8179.         pos += bytes_read;
  8180.         values[cnt++] = data & 0xff;
  8181.     }
  8182.  
  8183.     if (!cnt)
  8184.         goto free_buf;
  8185.  
  8186.     pr_info("address %x, count %d\n", offset, cnt);
  8187.     /* Perform the write(s) */
  8188.  
  8189.     ret = fg_mem_write(trans->chip, values, offset,
  8190.                 cnt, 0, 0);
  8191.     if (ret) {
  8192.         pr_err("SPMI write failed, err = %zu\n", ret);
  8193.     } else {
  8194.         ret = count;
  8195.         trans->offset += cnt > 4 ? 4 : cnt;
  8196.     }
  8197.  
  8198. free_buf:
  8199.     kfree(kbuf);
  8200. unlock_mutex:
  8201.     mutex_unlock(&trans->memif_dfs_lock);
  8202.     return ret;
  8203. }
  8204.  
  8205. static const struct file_operations fg_memif_dfs_reg_fops = {
  8206.     .open       = fg_memif_data_open,
  8207.     .release    = fg_memif_dfs_close,
  8208.     .read       = fg_memif_dfs_reg_read,
  8209.     .write      = fg_memif_dfs_reg_write,
  8210. };
  8211.  
  8212. #define ADDR_OF_FG_REGS_START   0x400
  8213. #define COUNT_OF_ALL_FG_REGS    0x200
  8214. static int fg_regs_open(struct inode *inode, struct file *file)
  8215. {
  8216.     struct fg_log_buffer *log;
  8217.     struct fg_trans *trans;
  8218.     u8 *data_buf;
  8219.     size_t logbufsize = SZ_4K;
  8220.     size_t databufsize = SZ_4K;
  8221.     if (!dbgfs_data.chip) {
  8222.         pr_err("Not initialized data\n");
  8223.         return -EINVAL;
  8224.     }
  8225.     /* Per file "transaction" data */
  8226.     trans = kzalloc(sizeof(*trans), GFP_KERNEL);
  8227.     if (!trans) {
  8228.         pr_err("Unable to allocate memory for transaction data\n");
  8229.         return -ENOMEM;
  8230.     }
  8231.     /* Allocate log buffer */
  8232.     log = kzalloc(logbufsize, GFP_KERNEL);
  8233.     if (!log) {
  8234.         kfree(trans);
  8235.         pr_err("Unable to allocate memory for log buffer\n");
  8236.         return -ENOMEM;
  8237.     }
  8238.     log->rpos = 0;
  8239.     log->wpos = 0;
  8240.     log->len = logbufsize - sizeof(*log);
  8241.     /* Allocate data buffer */
  8242.     data_buf = kzalloc(databufsize, GFP_KERNEL);
  8243.     if (!data_buf) {
  8244.         kfree(trans);
  8245.         kfree(log);
  8246.         pr_err("Unable to allocate memory for data buffer\n");
  8247.         return -ENOMEM;
  8248.     }
  8249.     trans->log      = log;
  8250.     trans->data     = data_buf;
  8251.     trans->cnt      = COUNT_OF_ALL_FG_REGS;
  8252.     trans->addr     = ADDR_OF_FG_REGS_START;
  8253.     trans->chip     = dbgfs_data.chip;
  8254.     trans->offset   = trans->addr;
  8255.     file->private_data = trans;
  8256.     return 0;
  8257. }
  8258. static ssize_t fg_regs_write(struct file *file, const char __user *buf,
  8259.             size_t count, loff_t *ppos)
  8260. {
  8261.     /* TODO: OEM didn't finish this code? */
  8262.     return 0;
  8263. }
  8264. static const struct file_operations fg_regs_sys_ops = {
  8265.     .open       = fg_regs_open,
  8266.     .release    = fg_memif_dfs_close,
  8267.     .read       = fg_memif_dfs_reg_read,
  8268.     .write      = fg_regs_write,
  8269. };
  8270.  
  8271. /**
  8272.  * fg_dfs_create_fs: create debugfs file system.
  8273.  * @return pointer to root directory or NULL if failed to create fs
  8274.  */
  8275. static struct dentry *fg_dfs_create_fs(void)
  8276. {
  8277.     struct dentry *root, *file;
  8278.  
  8279.     pr_debug("Creating FG_MEM debugfs file-system\n");
  8280.     root = debugfs_create_dir(DFS_ROOT_NAME, NULL);
  8281.     if (IS_ERR_OR_NULL(root)) {
  8282.         pr_err("Error creating top level directory err:%ld",
  8283.             (long)root);
  8284.         if (PTR_ERR(root) == -ENODEV)
  8285.             pr_err("debugfs is not enabled in the kernel");
  8286.         return NULL;
  8287.     }
  8288.  
  8289.     dbgfs_data.help_msg.size = strlen(dbgfs_data.help_msg.data);
  8290.  
  8291.     file = debugfs_create_blob("help", S_IRUGO, root, &dbgfs_data.help_msg);
  8292.     if (!file) {
  8293.         pr_err("error creating help entry\n");
  8294.         goto err_remove_fs;
  8295.     }
  8296.     return root;
  8297.  
  8298. err_remove_fs:
  8299.     debugfs_remove_recursive(root);
  8300.     return NULL;
  8301. }
  8302.  
  8303. /**
  8304.  * fg_dfs_get_root: return a pointer to FG debugfs root directory.
  8305.  * @return a pointer to the existing directory, or if no root
  8306.  * directory exists then create one. Directory is created with file that
  8307.  * configures SRAM transaction, namely: address, and count.
  8308.  * @returns valid pointer on success or NULL
  8309.  */
  8310. struct dentry *fg_dfs_get_root(void)
  8311. {
  8312.     if (dbgfs_data.root)
  8313.         return dbgfs_data.root;
  8314.  
  8315.     if (mutex_lock_interruptible(&dbgfs_data.lock) < 0)
  8316.         return NULL;
  8317.     /* critical section */
  8318.     if (!dbgfs_data.root) { /* double checking idiom */
  8319.         dbgfs_data.root = fg_dfs_create_fs();
  8320.     }
  8321.     mutex_unlock(&dbgfs_data.lock);
  8322.     return dbgfs_data.root;
  8323. }
  8324.  
  8325. /*
  8326.  * fg_dfs_create: adds new fg_mem if debugfs entry
  8327.  * @return zero on success
  8328.  */
  8329. int fg_dfs_create(struct fg_chip *chip)
  8330. {
  8331.     struct dentry *root;
  8332.     struct dentry *file;
  8333.  
  8334.     root = fg_dfs_get_root();
  8335.     if (!root)
  8336.         return -ENOENT;
  8337.  
  8338.     dbgfs_data.chip = chip;
  8339.  
  8340.     file = debugfs_create_u32("count", DFS_MODE, root, &(dbgfs_data.cnt));
  8341.     if (!file) {
  8342.         pr_err("error creating 'count' entry\n");
  8343.         goto err_remove_fs;
  8344.     }
  8345.  
  8346.     file = debugfs_create_x32("address", DFS_MODE,
  8347.             root, &(dbgfs_data.addr));
  8348.     if (!file) {
  8349.         pr_err("error creating 'address' entry\n");
  8350.         goto err_remove_fs;
  8351.     }
  8352.  
  8353.     file = debugfs_create_file("data", DFS_MODE, root, &dbgfs_data,
  8354.                             &fg_memif_dfs_reg_fops);
  8355.     if (!file) {
  8356.         pr_err("error creating 'data' entry\n");
  8357.         goto err_remove_fs;
  8358.     }
  8359.  
  8360.     /* create interface for dump all fg_regs. */
  8361.     file = debugfs_create_file("fg_regs", S_IRUGO | S_IWUSR, root, chip,
  8362.                             &fg_regs_sys_ops);
  8363.     if (!file) {
  8364.         pr_err("error creating 'fg_regs' entry\n");
  8365.         goto err_remove_fs;
  8366.     }
  8367.  
  8368.     return 0;
  8369.  
  8370. err_remove_fs:
  8371.     debugfs_remove_recursive(root);
  8372.     return -ENOMEM;
  8373. }
  8374.  
  8375. #define EXTERNAL_SENSE_OFFSET_REG   0x41C
  8376. #define EXT_OFFSET_TRIM_REG     0xF8
  8377. #define SEC_ACCESS_REG          0xD0
  8378. #define SEC_ACCESS_UNLOCK       0xA5
  8379. #define BCL_TRIM_REV_FIXED      12
  8380. static int bcl_trim_workaround(struct fg_chip *chip)
  8381. {
  8382.     u8 reg, rc;
  8383.  
  8384.     if (chip->tp_rev_addr == 0)
  8385.         return 0;
  8386.  
  8387.     rc = fg_read(chip, &reg, chip->tp_rev_addr, 1);
  8388.     if (rc) {
  8389.         pr_err("Failed to read tp reg, rc = %d\n", rc);
  8390.         return rc;
  8391.     }
  8392.     if (reg >= BCL_TRIM_REV_FIXED) {
  8393.         if (fg_debug_mask & FG_STATUS)
  8394.             pr_info("workaround not applied, tp_rev = %d\n", reg);
  8395.         return 0;
  8396.     }
  8397.  
  8398.     rc = fg_mem_read(chip, &reg, EXTERNAL_SENSE_OFFSET_REG, 1, 2, 0);
  8399.     if (rc) {
  8400.         pr_err("Failed to read ext sense offset trim, rc = %d\n", rc);
  8401.         return rc;
  8402.     }
  8403.     rc = fg_masked_write(chip, chip->soc_base + SEC_ACCESS_REG,
  8404.             SEC_ACCESS_UNLOCK, SEC_ACCESS_UNLOCK, 1);
  8405.  
  8406.     rc |= fg_masked_write(chip, chip->soc_base + EXT_OFFSET_TRIM_REG,
  8407.             0xFF, reg, 1);
  8408.     if (rc) {
  8409.         pr_err("Failed to write ext sense offset trim, rc = %d\n", rc);
  8410.         return rc;
  8411.     }
  8412.  
  8413.     return 0;
  8414. }
  8415.  
  8416. #define KI_COEFF_PRED_FULL_ADDR     0x408
  8417. #define TEMP_FRAC_SHIFT_REG     0x4A4
  8418. #define FG_ADC_CONFIG_REG       0x4B8
  8419. #define KI_COEFF_PRED_FULL_4_0_MSB  0x88
  8420. #define KI_COEFF_PRED_FULL_4_0_LSB  0x00
  8421. #define FG_BCL_CONFIG_OFFSET        0x3
  8422. #define ALERT_CFG_OFFSET        3
  8423. #define I_TERM_QUAL_BIT         BIT(1)
  8424. #define PATCH_NEG_CURRENT_BIT       BIT(3)
  8425. #define BCL_FORCED_HPM_IN_CHARGE    BIT(2)
  8426. #define IRQ_USE_VOLTAGE_HYST_BIT    BIT(0)
  8427. #define EMPTY_FROM_VOLTAGE_BIT      BIT(1)
  8428. #define EMPTY_FROM_SOC_BIT      BIT(2)
  8429. #define EMPTY_SOC_IRQ_MASK      (IRQ_USE_VOLTAGE_HYST_BIT | \
  8430.                     EMPTY_FROM_SOC_BIT | \
  8431.                     EMPTY_FROM_VOLTAGE_BIT)
  8432. static int fg_common_hw_init(struct fg_chip *chip)
  8433. {
  8434.     int rc;
  8435.     int resume_soc_raw;
  8436.     u8 val;
  8437.  
  8438.     update_iterm(chip);
  8439.     update_cutoff_voltage(chip);
  8440.     update_bcl_thresholds(chip);
  8441.     if (!chip->use_vbat_low_empty_soc)
  8442.         update_irq_volt_empty(chip);
  8443.  
  8444.     resume_soc_raw = settings[FG_MEM_RESUME_SOC].value;
  8445.     if (resume_soc_raw > 0) {
  8446.         rc = fg_set_resume_soc(chip, resume_soc_raw);
  8447.         if (rc) {
  8448.             pr_err("Couldn't set resume SOC for FG\n");
  8449.             return rc;
  8450.         }
  8451.     } else {
  8452.         pr_info("FG auto recharge threshold not specified in DT\n");
  8453.     }
  8454.  
  8455.     if (fg_sense_type >= 0) {
  8456.         rc = set_prop_sense_type(chip, fg_sense_type);
  8457.         if (rc) {
  8458.             pr_err("failed to config sense type %d rc=%d\n",
  8459.                     fg_sense_type, rc);
  8460.             return rc;
  8461.         }
  8462.     }
  8463.  
  8464.     rc = fg_mem_masked_write(chip, settings[FG_MEM_DELTA_SOC].address, 0xFF,
  8465.             settings[FG_MEM_DELTA_SOC].value,
  8466.             settings[FG_MEM_DELTA_SOC].offset);
  8467.     if (rc) {
  8468.         pr_err("failed to write delta soc rc=%d\n", rc);
  8469.         return rc;
  8470.     }
  8471.  
  8472.     /* Override the voltage threshold for vbatt_low with empty_volt */
  8473.     if (chip->use_vbat_low_empty_soc)
  8474.         settings[FG_MEM_BATT_LOW].value =
  8475.             settings[FG_MEM_IRQ_VOLT_EMPTY].value;
  8476.  
  8477.     rc = fg_mem_masked_write(chip, settings[FG_MEM_BATT_LOW].address, 0xFF,
  8478.             batt_to_setpoint_8b(settings[FG_MEM_BATT_LOW].value),
  8479.             settings[FG_MEM_BATT_LOW].offset);
  8480.     if (rc) {
  8481.         pr_err("failed to write Vbatt_low rc=%d\n", rc);
  8482.         return rc;
  8483.     }
  8484.  
  8485.     rc = fg_mem_masked_write(chip, settings[FG_MEM_THERM_DELAY].address,
  8486.         THERM_DELAY_MASK,
  8487.         therm_delay_to_setpoint(settings[FG_MEM_THERM_DELAY].value),
  8488.         settings[FG_MEM_THERM_DELAY].offset);
  8489.     if (rc) {
  8490.         pr_err("failed to write therm_delay rc=%d\n", rc);
  8491.         return rc;
  8492.     }
  8493.  
  8494.     if (chip->use_thermal_coefficients) {
  8495.         fg_mem_write(chip, chip->thermal_coefficients,
  8496.             THERMAL_COEFF_ADDR, THERMAL_COEFF_N_BYTES,
  8497.             THERMAL_COEFF_OFFSET, 0);
  8498.     }
  8499.  
  8500.     if (!chip->sw_rbias_ctrl) {
  8501.         rc = fg_mem_masked_write(chip, EXTERNAL_SENSE_SELECT,
  8502.                 BATT_TEMP_CNTRL_MASK,
  8503.                 TEMP_SENSE_ALWAYS_BIT,
  8504.                 BATT_TEMP_OFFSET);
  8505.         if (rc) {
  8506.             pr_err("failed to write BATT_TEMP_OFFSET rc=%d\n", rc);
  8507.             return rc;
  8508.         }
  8509.     }
  8510.  
  8511.     /* Read the cycle counter back from FG SRAM */
  8512.     if (chip->cyc_ctr.en)
  8513.         restore_cycle_counter(chip);
  8514.  
  8515.     if (chip->esr_pulse_tune_en) {
  8516.         rc = fg_mem_read(chip, &val, SYS_CFG_1_REG, 1, SYS_CFG_1_OFFSET,
  8517.                 0);
  8518.         if (rc) {
  8519.             pr_err("unable to read sys_cfg_1: %d\n", rc);
  8520.             return rc;
  8521.         }
  8522.  
  8523.         if (!(val & ENABLE_ESR_PULSE_VAL))
  8524.             chip->esr_extract_disabled = true;
  8525.  
  8526.         if (fg_debug_mask & FG_STATUS)
  8527.             pr_info("ESR extract is %sabled\n",
  8528.                 chip->esr_extract_disabled ? "dis" : "en");
  8529.  
  8530.         rc = fg_mem_read(chip, &val, CBITS_INPUT_FILTER_REG, 1,
  8531.                 CBITS_RMEAS1_OFFSET, 0);
  8532.         if (rc) {
  8533.             pr_err("unable to read cbits_input_filter_reg: %d\n",
  8534.                 rc);
  8535.             return rc;
  8536.         }
  8537.  
  8538.         if (val & (IMPTR_FAST_TIME_SHIFT | IMPTR_LONG_TIME_SHIFT))
  8539.             chip->imptr_pulse_slow_en = true;
  8540.  
  8541.         if (fg_debug_mask & FG_STATUS)
  8542.             pr_info("imptr_pulse_slow is %sabled\n",
  8543.                 chip->imptr_pulse_slow_en ? "en" : "dis");
  8544.     }
  8545.  
  8546.     rc = fg_mem_read(chip, &val, RSLOW_CFG_REG, 1, RSLOW_CFG_OFFSET,
  8547.             0);
  8548.     if (rc) {
  8549.         pr_err("unable to read rslow cfg: %d\n", rc);
  8550.         return rc;
  8551.     }
  8552.  
  8553.     if (val & RSLOW_CFG_ON_VAL)
  8554.         chip->rslow_comp.active = true;
  8555.  
  8556.     if (fg_debug_mask & FG_STATUS)
  8557.         pr_info("rslow_comp active is %sabled\n",
  8558.             chip->rslow_comp.active ? "en" : "dis");
  8559.  
  8560.     rc = fg_check_system_config(chip);
  8561.     if (rc) {
  8562.         pr_err("Failed to check system config rc=%d\n", rc);
  8563.         return rc;
  8564.     }
  8565.  
  8566.     /*
  8567.      * Clear bits 0-2 in 0x4B3 and set them again to make empty_soc irq
  8568.      * trigger again.
  8569.      */
  8570.     rc = fg_mem_masked_write(chip, FG_ALG_SYSCTL_1, EMPTY_SOC_IRQ_MASK,
  8571.             0, ALERT_CFG_OFFSET);
  8572.     if (rc) {
  8573.         pr_err("failed to write to 0x4B3 rc=%d\n", rc);
  8574.         return rc;
  8575.     }
  8576.  
  8577.     /* Wait for a FG cycle before enabling empty soc irq configuration */
  8578.     msleep(FG_CYCLE_MS);
  8579.  
  8580.     rc = fg_mem_masked_write(chip, FG_ALG_SYSCTL_1, EMPTY_SOC_IRQ_MASK,
  8581.             EMPTY_SOC_IRQ_MASK, ALERT_CFG_OFFSET);
  8582.     if (rc) {
  8583.         pr_err("failed to write to 0x4B3 rc=%d\n", rc);
  8584.         return rc;
  8585.     }
  8586.  
  8587.     return 0;
  8588. }
  8589.  
  8590. static int fg_8994_hw_init(struct fg_chip *chip)
  8591. {
  8592.     int rc = 0;
  8593.     u8 data[4];
  8594.     u64 esr_value;
  8595.  
  8596.     rc = fg_mem_masked_write(chip, EXTERNAL_SENSE_SELECT,
  8597.             PATCH_NEG_CURRENT_BIT,
  8598.             PATCH_NEG_CURRENT_BIT,
  8599.             EXTERNAL_SENSE_OFFSET);
  8600.     if (rc) {
  8601.         pr_err("failed to write patch current bit rc=%d\n", rc);
  8602.         return rc;
  8603.     }
  8604.  
  8605.     rc = bcl_trim_workaround(chip);
  8606.     if (rc) {
  8607.         pr_err("failed to redo bcl trim rc=%d\n", rc);
  8608.         return rc;
  8609.     }
  8610.  
  8611.     rc = fg_mem_masked_write(chip, FG_ADC_CONFIG_REG,
  8612.             BCL_FORCED_HPM_IN_CHARGE,
  8613.             BCL_FORCED_HPM_IN_CHARGE,
  8614.             FG_BCL_CONFIG_OFFSET);
  8615.     if (rc) {
  8616.         pr_err("failed to force hpm in charge rc=%d\n", rc);
  8617.         return rc;
  8618.     }
  8619.  
  8620.     fg_mem_masked_write(chip, FG_ALG_SYSCTL_1, I_TERM_QUAL_BIT, 0, 0);
  8621.  
  8622.     data[0] = 0xA2;
  8623.     data[1] = 0x12;
  8624.  
  8625.     rc = fg_mem_write(chip, data, TEMP_FRAC_SHIFT_REG, 2, 2, 0);
  8626.     if (rc) {
  8627.         pr_err("failed to write temp ocv constants rc=%d\n", rc);
  8628.         return rc;
  8629.     }
  8630.  
  8631.     data[0] = KI_COEFF_PRED_FULL_4_0_LSB;
  8632.     data[1] = KI_COEFF_PRED_FULL_4_0_MSB;
  8633.     fg_mem_write(chip, data, KI_COEFF_PRED_FULL_ADDR, 2, 2, 0);
  8634.  
  8635.     esr_value = ESR_DEFAULT_VALUE;
  8636.     rc = fg_mem_write(chip, (u8 *)&esr_value, MAXRSCHANGE_REG, 8,
  8637.             ESR_VALUE_OFFSET, 0);
  8638.     if (rc)
  8639.         pr_err("failed to write default ESR value rc=%d\n", rc);
  8640.     else
  8641.         pr_debug("set default value to esr filter\n");
  8642.  
  8643.     return 0;
  8644. }
  8645.  
  8646. #define FG_USBID_CONFIG_OFFSET      0x2
  8647. #define DISABLE_USBID_DETECT_BIT    BIT(0)
  8648. static int fg_8996_hw_init(struct fg_chip *chip)
  8649. {
  8650.     int rc;
  8651.  
  8652.     rc = fg_mem_masked_write(chip, FG_ADC_CONFIG_REG,
  8653.             BCL_FORCED_HPM_IN_CHARGE,
  8654.             BCL_FORCED_HPM_IN_CHARGE,
  8655.             FG_BCL_CONFIG_OFFSET);
  8656.     if (rc) {
  8657.         pr_err("failed to force hpm in charge rc=%d\n", rc);
  8658.         return rc;
  8659.     }
  8660.  
  8661.     /* enable usbid conversions for PMi8996 V1.0 */
  8662.     if (chip->pmic_revision[REVID_DIG_MAJOR] == 1
  8663.             && chip->pmic_revision[REVID_ANA_MAJOR] == 0) {
  8664.         rc = fg_mem_masked_write(chip, FG_ADC_CONFIG_REG,
  8665.                 DISABLE_USBID_DETECT_BIT,
  8666.                 0, FG_USBID_CONFIG_OFFSET);
  8667.         if (rc) {
  8668.             pr_err("failed to enable usbid conversions: %d\n", rc);
  8669.             return rc;
  8670.         }
  8671.     }
  8672.  
  8673.     return rc;
  8674. }
  8675.  
  8676. static int fg_8950_hw_init(struct fg_chip *chip)
  8677. {
  8678.     int rc;
  8679.  
  8680.     rc = fg_mem_masked_write(chip, FG_ADC_CONFIG_REG,
  8681.             BCL_FORCED_HPM_IN_CHARGE,
  8682.             BCL_FORCED_HPM_IN_CHARGE,
  8683.             FG_BCL_CONFIG_OFFSET);
  8684.     if (rc)
  8685.         pr_err("failed to force hpm in charge rc=%d\n", rc);
  8686.  
  8687.     return rc;
  8688. }
  8689.  
  8690. static int fg_hw_init(struct fg_chip *chip)
  8691. {
  8692.     int rc = 0;
  8693.  
  8694.     rc = fg_common_hw_init(chip);
  8695.     if (rc) {
  8696.         pr_err("Unable to initialize FG HW rc=%d\n", rc);
  8697.         return rc;
  8698.     }
  8699.  
  8700.     /* add PMIC specific hw init */
  8701.     switch (chip->pmic_subtype) {
  8702.     case PMI8994:
  8703.         rc = fg_8994_hw_init(chip);
  8704.         chip->wa_flag |= PULSE_REQUEST_WA;
  8705.         break;
  8706.     case PMI8996:
  8707.         rc = fg_8996_hw_init(chip);
  8708.         /* Setup workaround flag based on PMIC type */
  8709.         if (fg_sense_type == INTERNAL_CURRENT_SENSE)
  8710.             chip->wa_flag |= IADC_GAIN_COMP_WA;
  8711.         if (chip->pmic_revision[REVID_DIG_MAJOR] >= 1)
  8712.             chip->wa_flag |= USE_CC_SOC_REG;
  8713.  
  8714.         break;
  8715.     case PMI8950:
  8716.     case PMI8937:
  8717.     case PMI8940:
  8718.         rc = fg_8950_hw_init(chip);
  8719.         /* Setup workaround flag based on PMIC type */
  8720.         chip->wa_flag |= BCL_HI_POWER_FOR_CHGLED_WA;
  8721.         if (fg_sense_type == INTERNAL_CURRENT_SENSE)
  8722.             chip->wa_flag |= IADC_GAIN_COMP_WA;
  8723.         if (chip->pmic_revision[REVID_DIG_MAJOR] > 1)
  8724.             chip->wa_flag |= USE_CC_SOC_REG;
  8725.  
  8726.         break;
  8727.     }
  8728.     if (rc)
  8729.         pr_err("Unable to initialize PMIC specific FG HW rc=%d\n", rc);
  8730.  
  8731.     pr_debug("wa_flag=0x%x\n", chip->wa_flag);
  8732.  
  8733.     return rc;
  8734. }
  8735.  
  8736. static int fg_init_iadc_config(struct fg_chip *chip)
  8737. {
  8738.     u8 reg[2];
  8739.     int rc;
  8740.  
  8741.     /* read default gain config */
  8742.     rc = fg_mem_read(chip, reg, K_VCOR_REG, 2, DEF_GAIN_OFFSET, 0);
  8743.     if (rc) {
  8744.         pr_err("Failed to read default gain rc=%d\n", rc);
  8745.         return rc;
  8746.     }
  8747.  
  8748.     if (reg[1] || reg[0]) {
  8749.         /*
  8750.          * Default gain register has valid value:
  8751.          * - write to gain register.
  8752.          */
  8753.         rc = fg_mem_write(chip, reg, GAIN_REG, 2,
  8754.                         GAIN_OFFSET, 0);
  8755.         if (rc) {
  8756.             pr_err("Failed to write gain rc=%d\n", rc);
  8757.             return rc;
  8758.         }
  8759.     } else {
  8760.         /*
  8761.          * Default gain register is invalid:
  8762.          * - read gain register for default gain value
  8763.          * - write to default gain register.
  8764.          */
  8765.         rc = fg_mem_read(chip, reg, GAIN_REG, 2,
  8766.                         GAIN_OFFSET, 0);
  8767.         if (rc) {
  8768.             pr_err("Failed to read gain rc=%d\n", rc);
  8769.             return rc;
  8770.         }
  8771.         rc = fg_mem_write(chip, reg, K_VCOR_REG, 2,
  8772.                         DEF_GAIN_OFFSET, 0);
  8773.         if (rc) {
  8774.             pr_err("Failed to write default gain rc=%d\n",
  8775.                                 rc);
  8776.             return rc;
  8777.         }
  8778.     }
  8779.  
  8780.     chip->iadc_comp_data.dfl_gain_reg[0] = reg[0];
  8781.     chip->iadc_comp_data.dfl_gain_reg[1] = reg[1];
  8782.     chip->iadc_comp_data.dfl_gain = half_float(reg);
  8783.  
  8784.     pr_debug("IADC gain initial config reg_val 0x%x%x gain %lld\n",
  8785.                reg[1], reg[0], chip->iadc_comp_data.dfl_gain);
  8786.     return 0;
  8787. }
  8788.  
  8789. #define EN_WR_FGXCT_PRD     BIT(6)
  8790. #define EN_RD_FGXCT_PRD     BIT(5)
  8791. #define FG_RESTART_TIMEOUT_MS   12000
  8792. static void ima_error_recovery_work(struct work_struct *work)
  8793. {
  8794.     struct fg_chip *chip = container_of(work,
  8795.                 struct fg_chip,
  8796.                 ima_error_recovery_work);
  8797.     bool tried_again = false;
  8798.     int rc;
  8799.     u8 buf[4] = {0, 0, 0, 0};
  8800.  
  8801.     fg_stay_awake(&chip->fg_reset_wakeup_source);
  8802.     mutex_lock(&chip->ima_recovery_lock);
  8803.     if (!chip->ima_error_handling) {
  8804.         pr_err("Scheduled by mistake?\n");
  8805.         mutex_unlock(&chip->ima_recovery_lock);
  8806.         fg_relax(&chip->fg_reset_wakeup_source);
  8807.         return;
  8808.     }
  8809.  
  8810.     /*
  8811.      * SOC should be read and used until the error recovery completes.
  8812.      * Without this, there could be a fluctuation in SOC values notified
  8813.      * to the userspace.
  8814.      */
  8815.     chip->use_last_soc = true;
  8816.  
  8817.     /* Block SRAM access till FG reset is complete */
  8818.     chip->block_sram_access = true;
  8819.  
  8820.     /* Release the mutex to avoid deadlock while cancelling the works */
  8821.     mutex_unlock(&chip->ima_recovery_lock);
  8822.  
  8823.     /* Cancel all the works */
  8824.     fg_cancel_all_works(chip);
  8825.  
  8826.     if (fg_debug_mask & FG_STATUS)
  8827.         pr_info("last_soc: %d\n", chip->last_soc);
  8828.  
  8829.     mutex_lock(&chip->ima_recovery_lock);
  8830.     /* Acquire IMA access forcibly from FG ALG */
  8831.     rc = fg_masked_write(chip, chip->mem_base + MEM_INTF_IMA_CFG,
  8832.             EN_WR_FGXCT_PRD | EN_RD_FGXCT_PRD,
  8833.             EN_WR_FGXCT_PRD | EN_RD_FGXCT_PRD, 1);
  8834.     if (rc) {
  8835.         pr_err("Error in writing to IMA_CFG, rc=%d\n", rc);
  8836.         goto out;
  8837.     }
  8838.  
  8839.     /* Release the IMA access now so that FG reset can go through */
  8840.     rc = fg_masked_write(chip, chip->mem_base + MEM_INTF_IMA_CFG,
  8841.             EN_WR_FGXCT_PRD | EN_RD_FGXCT_PRD, 0, 1);
  8842.     if (rc) {
  8843.         pr_err("Error in writing to IMA_CFG, rc=%d\n", rc);
  8844.         goto out;
  8845.     }
  8846.  
  8847.     if (fg_debug_mask & FG_STATUS)
  8848.         pr_info("resetting FG\n");
  8849.  
  8850.     /* Assert FG reset */
  8851.     rc = fg_reset(chip, true);
  8852.     if (rc) {
  8853.         pr_err("Couldn't reset FG\n");
  8854.         goto out;
  8855.     }
  8856.  
  8857.     /* Wait for a small time before deasserting FG reset */
  8858.     msleep(100);
  8859.  
  8860.     if (fg_debug_mask & FG_STATUS)
  8861.         pr_info("clearing FG from reset\n");
  8862.  
  8863.     /* Deassert FG reset */
  8864.     rc = fg_reset(chip, false);
  8865.     if (rc) {
  8866.         pr_err("Couldn't clear FG reset\n");
  8867.         goto out;
  8868.     }
  8869.  
  8870.     /* Wait for at least a FG cycle before doing SRAM access */
  8871.     msleep(2000);
  8872.  
  8873.     chip->block_sram_access = false;
  8874.  
  8875.     if (!chip->init_done) {
  8876.         schedule_work(&chip->init_work);
  8877.         goto wait;
  8878.     }
  8879.  
  8880.     if (fg_debug_mask & FG_STATUS)
  8881.         pr_info("Calling hw_init\n");
  8882.  
  8883.     /*
  8884.      * Once FG is reset, everything in SRAM will be wiped out. Redo
  8885.      * hw_init, update jeita settings etc., again to make sure all
  8886.      * the settings got restored again.
  8887.      */
  8888.     rc = fg_hw_init(chip);
  8889.     if (rc) {
  8890.         pr_err("Error in hw_init, rc=%d\n", rc);
  8891.         goto out;
  8892.     }
  8893.  
  8894.     update_jeita_setting(&chip->update_jeita_setting.work);
  8895.  
  8896.     if (chip->wa_flag & IADC_GAIN_COMP_WA) {
  8897.         rc = fg_init_iadc_config(chip);
  8898.         if (rc)
  8899.             goto out;
  8900.     }
  8901.  
  8902.     if (fg_debug_mask & FG_STATUS)
  8903.         pr_info("loading battery profile\n");
  8904.     if (!chip->use_otp_profile) {
  8905.         chip->battery_missing = true;
  8906.         chip->profile_loaded = false;
  8907.         chip->soc_reporting_ready = false;
  8908.         chip->batt_type = default_batt_type;
  8909.         fg_handle_battery_insertion(chip);
  8910.     }
  8911.  
  8912. wait:
  8913.     rc = wait_for_completion_interruptible_timeout(&chip->fg_reset_done,
  8914.             msecs_to_jiffies(FG_RESTART_TIMEOUT_MS));
  8915.  
  8916.     /* If we were interrupted wait again one more time. */
  8917.     if (rc == -ERESTARTSYS && !tried_again) {
  8918.         tried_again = true;
  8919.         pr_debug("interrupted, waiting again\n");
  8920.         goto wait;
  8921.     } else if (rc <= 0) {
  8922.         pr_err("fg_restart taking long time rc=%d\n", rc);
  8923.         goto out;
  8924.     }
  8925.  
  8926.     rc = fg_mem_write(chip, buf, fg_data[FG_DATA_VINT_ERR].address,
  8927.             fg_data[FG_DATA_VINT_ERR].len,
  8928.             fg_data[FG_DATA_VINT_ERR].offset, 0);
  8929.     if (rc < 0)
  8930.         pr_err("Error in clearing VACT_INT_ERR, rc=%d\n", rc);
  8931.  
  8932.     if (fg_debug_mask & FG_STATUS)
  8933.         pr_info("IMA error recovery done...\n");
  8934. out:
  8935.     fg_restore_soc(chip);
  8936.     fg_restore_cc_soc(chip);
  8937.     fg_enable_irqs(chip, true);
  8938.     update_sram_data_work(&chip->update_sram_data.work);
  8939.     update_temp_data(&chip->update_temp_work.work);
  8940.     schedule_delayed_work(&chip->check_sanity_work,
  8941.         msecs_to_jiffies(1000));
  8942.     chip->ima_error_handling = false;
  8943.     mutex_unlock(&chip->ima_recovery_lock);
  8944.     fg_relax(&chip->fg_reset_wakeup_source);
  8945. }
  8946.  
  8947. #define DIG_MINOR       0x0
  8948. #define DIG_MAJOR       0x1
  8949. #define ANA_MINOR       0x2
  8950. #define ANA_MAJOR       0x3
  8951. #define IACS_INTR_SRC_SLCT  BIT(3)
  8952. static int fg_memif_init(struct fg_chip *chip)
  8953. {
  8954.     int rc;
  8955.  
  8956.     rc = fg_read(chip, chip->revision, chip->mem_base + DIG_MINOR, 4);
  8957.     if (rc) {
  8958.         pr_err("Unable to read FG revision rc=%d\n", rc);
  8959.         return rc;
  8960.     }
  8961.  
  8962.     switch (chip->revision[DIG_MAJOR]) {
  8963.     case DIG_REV_8994_1:
  8964.     case DIG_REV_8994_2:
  8965.         chip->offset = offset[0].address;
  8966.         break;
  8967.     case DIG_REV_8950_3:
  8968.         chip->offset = offset[1].address;
  8969.         chip->ima_supported = true;
  8970.         break;
  8971.     default:
  8972.         pr_err("Digital Major rev=%d not supported\n",
  8973.                     chip->revision[DIG_MAJOR]);
  8974.         return -EINVAL;
  8975.     }
  8976.  
  8977.     if (chip->ima_supported) {
  8978.         /*
  8979.          * Change the FG_MEM_INT interrupt to track IACS_READY
  8980.          * condition instead of end-of-transaction. This makes sure
  8981.          * that the next transaction starts only after the hw is ready.
  8982.          */
  8983.         rc = fg_masked_write(chip,
  8984.             chip->mem_base + MEM_INTF_IMA_CFG, IACS_INTR_SRC_SLCT,
  8985.             IACS_INTR_SRC_SLCT, 1);
  8986.         if (rc) {
  8987.             pr_err("failed to configure interrupt source %d\n", rc);
  8988.             return rc;
  8989.         }
  8990.  
  8991.         /* check for error condition */
  8992.         rc = fg_check_ima_exception(chip, true);
  8993.         if (rc && rc != -EAGAIN) {
  8994.             pr_err("Error in clearing IMA exception rc=%d", rc);
  8995.             return rc;
  8996.         }
  8997.     }
  8998.  
  8999.     return 0;
  9000. }
  9001.  
  9002. static int fg_detect_pmic_type(struct fg_chip *chip)
  9003. {
  9004.     struct pmic_revid_data *pmic_rev_id;
  9005.     struct device_node *revid_dev_node;
  9006.  
  9007.     revid_dev_node = of_parse_phandle(chip->spmi->dev.of_node,
  9008.                     "qcom,pmic-revid", 0);
  9009.     if (!revid_dev_node) {
  9010.         pr_err("Missing qcom,pmic-revid property - driver failed\n");
  9011.         return -EINVAL;
  9012.     }
  9013.  
  9014.     pmic_rev_id = get_revid_data(revid_dev_node);
  9015.     if (IS_ERR_OR_NULL(pmic_rev_id)) {
  9016.         pr_err("Unable to get pmic_revid rc=%ld\n",
  9017.                 PTR_ERR(pmic_rev_id));
  9018.         /*
  9019.          * the revid peripheral must be registered, any failure
  9020.          * here only indicates that the rev-id module has not
  9021.          * probed yet.
  9022.          */
  9023.         return -EPROBE_DEFER;
  9024.     }
  9025.  
  9026.     switch (pmic_rev_id->pmic_subtype) {
  9027.     case PMI8994:
  9028.     case PMI8950:
  9029.     case PMI8937:
  9030.     case PMI8996:
  9031.     case PMI8940:
  9032.         chip->pmic_subtype = pmic_rev_id->pmic_subtype;
  9033.         chip->pmic_revision[REVID_RESERVED] = pmic_rev_id->rev1;
  9034.         chip->pmic_revision[REVID_VARIANT]  = pmic_rev_id->rev2;
  9035.         chip->pmic_revision[REVID_ANA_MAJOR]    = pmic_rev_id->rev3;
  9036.         chip->pmic_revision[REVID_DIG_MAJOR]    = pmic_rev_id->rev4;
  9037.         break;
  9038.     default:
  9039.         pr_err("PMIC subtype %d not supported\n",
  9040.                 pmic_rev_id->pmic_subtype);
  9041.         return -EINVAL;
  9042.     }
  9043.  
  9044.     return 0;
  9045. }
  9046.  
  9047. #define INIT_JEITA_DELAY_MS 1000
  9048. static void delayed_init_work(struct work_struct *work)
  9049. {
  9050.     int rc;
  9051.     struct fg_chip *chip = container_of(work,
  9052.                 struct fg_chip,
  9053.                 init_work);
  9054.  
  9055.     /* hold memory access until initialization finishes */
  9056.     fg_mem_lock(chip);
  9057.  
  9058.     rc = fg_hw_init(chip);
  9059.     if (rc) {
  9060.         pr_err("failed to hw init rc = %d\n", rc);
  9061.         if (!chip->init_done && chip->ima_supported) {
  9062.             rc = fg_check_alg_status(chip);
  9063.             if (rc && rc != -EBUSY)
  9064.                 pr_err("Couldn't check FG ALG status, rc=%d\n",
  9065.                     rc);
  9066.             fg_mem_release(chip);
  9067.             return;
  9068.         }
  9069.         fg_mem_release(chip);
  9070.         fg_cleanup(chip);
  9071.         return;
  9072.     }
  9073.     /* release memory access before update_sram_data is called */
  9074.     fg_mem_release(chip);
  9075.  
  9076.     schedule_delayed_work(
  9077.         &chip->update_jeita_setting,
  9078.         msecs_to_jiffies(INIT_JEITA_DELAY_MS));
  9079.  
  9080.     if (chip->last_sram_update_time == 0)
  9081.         update_sram_data_work(&chip->update_sram_data.work);
  9082.  
  9083.     if (chip->last_temp_update_time == 0)
  9084.         update_temp_data(&chip->update_temp_work.work);
  9085.  
  9086.     if (!chip->use_otp_profile)
  9087.         schedule_delayed_work(&chip->batt_profile_init, 0);
  9088.  
  9089.     if (chip->ima_supported && fg_reset_on_lockup)
  9090.         schedule_delayed_work(&chip->check_sanity_work,
  9091.             msecs_to_jiffies(1000));
  9092.  
  9093.     if (chip->wa_flag & IADC_GAIN_COMP_WA) {
  9094.         rc = fg_init_iadc_config(chip);
  9095.         if (rc)
  9096.             goto done;
  9097.     }
  9098.  
  9099.     chip->input_present = is_input_present(chip);
  9100.     chip->otg_present = is_otg_present(chip);
  9101.     chip->init_done = true;
  9102.     pr_debug("FG: HW_init success\n");
  9103.  
  9104.     return;
  9105. done:
  9106.     fg_cleanup(chip);
  9107. }
  9108.  
  9109. static int fg_reboot_handler(struct notifier_block *nb,
  9110.              unsigned long event, void *unused)
  9111. {
  9112.     struct fg_chip *chip =
  9113.             container_of(nb, struct fg_chip, fg_reboot);
  9114.     int rc;
  9115.  
  9116.     dev_dbg(chip->dev, "FG Reboot\n");
  9117.     if (!chip) {
  9118.         dev_warn(chip->dev, "called before chip valid!\n");
  9119.         return NOTIFY_DONE;
  9120.     }
  9121.  
  9122.     switch (event) {
  9123.     case SYS_POWER_OFF:
  9124.         chip->shutdown_in_process = true;
  9125.         dev_warn(chip->dev, "Assert SRAM to Stop RBIAS!\n");
  9126.         rc = fg_masked_write(chip, MEM_INTF_CFG(chip),
  9127.                      RIF_MEM_ACCESS_REQ,
  9128.                      RIF_MEM_ACCESS_REQ, 1);
  9129.         if (rc)
  9130.             pr_err("failed to set mem access bit\n");
  9131.         msleep(2000);
  9132.         break;
  9133.     default:
  9134.         break;
  9135.     }
  9136.  
  9137.     return NOTIFY_DONE;
  9138. }
  9139.  
  9140. static int fg_probe(struct spmi_device *spmi)
  9141. {
  9142.     struct device *dev = &(spmi->dev);
  9143.     struct fg_chip *chip;
  9144.     struct spmi_resource *spmi_resource;
  9145.     struct resource *resource;
  9146.     u8 subtype, reg;
  9147.     int rc = 0;
  9148.  
  9149.     if (!spmi) {
  9150.         pr_err("no valid spmi pointer\n");
  9151.         return -ENODEV;
  9152.     }
  9153.  
  9154.     if (!spmi->dev.of_node) {
  9155.         pr_err("device node missing\n");
  9156.         return -ENODEV;
  9157.     }
  9158.  
  9159.     chip = devm_kzalloc(dev, sizeof(struct fg_chip), GFP_KERNEL);
  9160.     if (chip == NULL) {
  9161.         pr_err("Can't allocate fg_chip\n");
  9162.         return -ENOMEM;
  9163.     }
  9164.  
  9165.     chip->spmi = spmi;
  9166.     chip->dev = &(spmi->dev);
  9167.  
  9168.     rc = device_create_file(chip->dev, &dev_attr_cycle_counts_bins);
  9169.     if (rc != 0)
  9170.         dev_err(chip->dev,
  9171.             "Failed to create cycle_counts_bins files: %d\n", rc);
  9172.  
  9173.     wakeup_source_init(&chip->empty_check_wakeup_source.source,
  9174.             "qpnp_fg_empty_check");
  9175.     wakeup_source_init(&chip->memif_wakeup_source.source,
  9176.             "qpnp_fg_memaccess");
  9177.     wakeup_source_init(&chip->profile_wakeup_source.source,
  9178.             "qpnp_fg_profile");
  9179.     wakeup_source_init(&chip->update_temp_wakeup_source.source,
  9180.             "qpnp_fg_update_temp");
  9181.     wakeup_source_init(&chip->update_sram_wakeup_source.source,
  9182.             "qpnp_fg_update_sram");
  9183.     wakeup_source_init(&chip->resume_soc_wakeup_source.source,
  9184.             "qpnp_fg_set_resume_soc");
  9185.     wakeup_source_init(&chip->gain_comp_wakeup_source.source,
  9186.             "qpnp_fg_gain_comp");
  9187.     wakeup_source_init(&chip->capacity_learning_wakeup_source.source,
  9188.             "qpnp_fg_cap_learning");
  9189.     wakeup_source_init(&chip->esr_extract_wakeup_source.source,
  9190.             "qpnp_fg_esr_extract");
  9191.     wakeup_source_init(&chip->slope_limit_wakeup_source.source,
  9192.             "qpnp_fg_slope_limit");
  9193.     wakeup_source_init(&chip->dischg_gain_wakeup_source.source,
  9194.             "qpnp_fg_dischg_gain");
  9195.     wakeup_source_init(&chip->fg_reset_wakeup_source.source,
  9196.             "qpnp_fg_reset");
  9197.     wakeup_source_init(&chip->cc_soc_wakeup_source.source,
  9198.             "qpnp_fg_cc_soc");
  9199.     wakeup_source_init(&chip->sanity_wakeup_source.source,
  9200.             "qpnp_fg_sanity_check");
  9201.     spin_lock_init(&chip->sec_access_lock);
  9202.     mutex_init(&chip->rw_lock);
  9203.     mutex_init(&chip->cyc_ctr.lock);
  9204.     mutex_init(&chip->learning_data.learning_lock);
  9205.     mutex_init(&chip->rslow_comp.lock);
  9206.     mutex_init(&chip->sysfs_restart_lock);
  9207.     mutex_init(&chip->ima_recovery_lock);
  9208.     INIT_DELAYED_WORK(&chip->update_jeita_setting, update_jeita_setting);
  9209.     INIT_DELAYED_WORK(&chip->update_sram_data, update_sram_data_work);
  9210.     INIT_DELAYED_WORK(&chip->update_temp_work, update_temp_data);
  9211.     INIT_DELAYED_WORK(&chip->check_empty_work, check_empty_work);
  9212.     INIT_DELAYED_WORK(&chip->batt_profile_init, batt_profile_init);
  9213.     INIT_DELAYED_WORK(&chip->check_sanity_work, check_sanity_work);
  9214.     INIT_WORK(&chip->ima_error_recovery_work, ima_error_recovery_work);
  9215.     INIT_WORK(&chip->rslow_comp_work, rslow_comp_work);
  9216.     INIT_WORK(&chip->fg_cap_learning_work, fg_cap_learning_work);
  9217.     INIT_WORK(&chip->dump_sram, dump_sram);
  9218.     INIT_WORK(&chip->status_change_work, status_change_work);
  9219.     INIT_WORK(&chip->cycle_count_work, update_cycle_count);
  9220.     INIT_WORK(&chip->battery_age_work, battery_age_work);
  9221.     INIT_WORK(&chip->update_esr_work, update_esr_value);
  9222.     INIT_WORK(&chip->set_resume_soc_work, set_resume_soc_work);
  9223.     INIT_WORK(&chip->sysfs_restart_work, sysfs_restart_work);
  9224.     INIT_WORK(&chip->init_work, delayed_init_work);
  9225.     INIT_WORK(&chip->charge_full_work, charge_full_work);
  9226.     INIT_WORK(&chip->gain_comp_work, iadc_gain_comp_work);
  9227.     INIT_WORK(&chip->bcl_hi_power_work, bcl_hi_power_work);
  9228.     INIT_WORK(&chip->esr_extract_config_work, esr_extract_config_work);
  9229.     INIT_WORK(&chip->slope_limiter_work, slope_limiter_work);
  9230.     INIT_WORK(&chip->dischg_gain_work, discharge_gain_work);
  9231.     INIT_WORK(&chip->cc_soc_store_work, cc_soc_store_work);
  9232.     alarm_init(&chip->fg_cap_learning_alarm, ALARM_BOOTTIME,
  9233.             fg_cap_learning_alarm_cb);
  9234.     alarm_init(&chip->hard_jeita_alarm, ALARM_BOOTTIME,
  9235.             fg_hard_jeita_alarm_cb);
  9236.     init_completion(&chip->sram_access_granted);
  9237.     init_completion(&chip->sram_access_revoked);
  9238.     init_completion(&chip->fg_sram_updating_done);
  9239.     complete_all(&chip->sram_access_revoked);
  9240.     init_completion(&chip->batt_id_avail);
  9241.     init_completion(&chip->first_soc_done);
  9242.     init_completion(&chip->fg_reset_done);
  9243.     dev_set_drvdata(&spmi->dev, chip);
  9244.  
  9245.     spmi_for_each_container_dev(spmi_resource, spmi) {
  9246.         if (!spmi_resource) {
  9247.             pr_err("qpnp_chg: spmi resource absent\n");
  9248.             rc = -ENXIO;
  9249.             goto of_init_fail;
  9250.         }
  9251.  
  9252.         resource = spmi_get_resource(spmi, spmi_resource,
  9253.                         IORESOURCE_MEM, 0);
  9254.         if (!(resource && resource->start)) {
  9255.             pr_err("node %s IO resource absent!\n",
  9256.                 spmi->dev.of_node->full_name);
  9257.             rc = -ENXIO;
  9258.             goto of_init_fail;
  9259.         }
  9260.  
  9261.         if (strcmp("qcom,fg-adc-vbat",
  9262.                     spmi_resource->of_node->name) == 0) {
  9263.             chip->vbat_adc_addr = resource->start;
  9264.             continue;
  9265.         } else if (strcmp("qcom,fg-adc-ibat",
  9266.                     spmi_resource->of_node->name) == 0) {
  9267.             chip->ibat_adc_addr = resource->start;
  9268.             continue;
  9269.         } else if (strcmp("qcom,revid-tp-rev",
  9270.                     spmi_resource->of_node->name) == 0) {
  9271.             chip->tp_rev_addr = resource->start;
  9272.             continue;
  9273.         }
  9274.  
  9275.         rc = fg_read(chip, &subtype,
  9276.                 resource->start + REG_OFFSET_PERP_SUBTYPE, 1);
  9277.         if (rc) {
  9278.             pr_err("Peripheral subtype read failed rc=%d\n", rc);
  9279.             goto of_init_fail;
  9280.         }
  9281.  
  9282.         switch (subtype) {
  9283.         case FG_SOC:
  9284.             chip->soc_base = resource->start;
  9285.             break;
  9286.         case FG_MEMIF:
  9287.             chip->mem_base = resource->start;
  9288.             break;
  9289.         case FG_BATT:
  9290.             chip->batt_base = resource->start;
  9291.             break;
  9292.         default:
  9293.             pr_err("Invalid peripheral subtype=0x%x\n", subtype);
  9294.             rc = -EINVAL;
  9295.         }
  9296.     }
  9297.  
  9298.     chip->shutdown_in_process = false;
  9299.  
  9300.     rc = fg_detect_pmic_type(chip);
  9301.     if (rc) {
  9302.         pr_err("Unable to detect PMIC type rc=%d\n", rc);
  9303.         return rc;
  9304.     }
  9305.  
  9306.     rc = fg_memif_init(chip);
  9307.     if (rc) {
  9308.         pr_err("Unable to setup mem_if offsets rc=%d\n", rc);
  9309.         goto of_init_fail;
  9310.     }
  9311.  
  9312.     rc = fg_of_init(chip);
  9313.     if (rc) {
  9314.         pr_err("failed to parse devicetree rc%d\n", rc);
  9315.         goto of_init_fail;
  9316.     }
  9317.  
  9318.     if (chip->jeita_hysteresis_support) {
  9319.         rc = fg_init_batt_temp_state(chip);
  9320.         if (rc) {
  9321.             pr_err("failed to get battery status rc%d\n", rc);
  9322.             goto of_init_fail;
  9323.         }
  9324.     }
  9325.  
  9326.     /* check if the first estimate is already finished at this time */
  9327.     if (is_first_est_done(chip))
  9328.         complete_all(&chip->first_soc_done);
  9329.  
  9330.     reg = 0xFF;
  9331.     rc = fg_write(chip, &reg, INT_EN_CLR(chip->mem_base), 1);
  9332.     if (rc) {
  9333.         pr_err("failed to clear interrupts %d\n", rc);
  9334.         goto of_init_fail;
  9335.     }
  9336.  
  9337.     rc = fg_init_irqs(chip);
  9338.     if (rc) {
  9339.         pr_err("failed to request interrupts %d\n", rc);
  9340.         goto cancel_work;
  9341.     }
  9342.  
  9343.     chip->batt_type = default_batt_type;
  9344.  
  9345.     chip->bms_psy.name = "bms";
  9346.  
  9347.     chip->bms_psy.type = POWER_SUPPLY_TYPE_BMS;
  9348.     chip->bms_psy.properties = fg_power_props;
  9349.     chip->bms_psy.num_properties = ARRAY_SIZE(fg_power_props);
  9350.     chip->bms_psy.get_property = fg_power_get_property;
  9351.     chip->bms_psy.set_property = fg_power_set_property;
  9352.     chip->bms_psy.external_power_changed = fg_external_power_changed;
  9353.     chip->bms_psy.supplied_to = fg_supplicants;
  9354.     chip->bms_psy.num_supplicants = ARRAY_SIZE(fg_supplicants);
  9355.     chip->bms_psy.property_is_writeable = fg_property_is_writeable;
  9356.  
  9357.     rc = power_supply_register(chip->dev, &chip->bms_psy);
  9358.     if (rc < 0) {
  9359.         pr_err("batt failed to register rc = %d\n", rc);
  9360.         goto of_init_fail;
  9361.     }
  9362.     chip->power_supply_registered = true;
  9363.     /*
  9364.      * Just initialize the batt_psy_name here. Power supply
  9365.      * will be obtained later.
  9366.      */
  9367.     chip->batt_psy_name = "battery";
  9368.  
  9369.     if (chip->mem_base) {
  9370.         rc = fg_dfs_create(chip);
  9371.         if (rc < 0) {
  9372.             pr_err("failed to create debugfs rc = %d\n", rc);
  9373.             goto power_supply_unregister;
  9374.         }
  9375.     }
  9376.  
  9377.     /* Fake temperature till the actual temperature is read */
  9378.     chip->last_good_temp = 250;
  9379.  
  9380.     /* Initialize batt_info variables */
  9381.     chip->batt_range_ocv = &fg_batt_valid_ocv;
  9382.     chip->batt_range_pct = &fg_batt_range_pct;
  9383.     memset(chip->batt_info, INT_MAX, sizeof(chip->batt_info));
  9384.  
  9385.     chip->fg_reboot.notifier_call = fg_reboot_handler;
  9386.     chip->fg_reboot.next = NULL;
  9387.     chip->fg_reboot.priority = 1;
  9388.     rc = register_reboot_notifier(&chip->fg_reboot);
  9389.     if (rc)
  9390.         dev_err(chip->dev, "register for reboot failed\n");
  9391.  
  9392.     schedule_work(&chip->init_work);
  9393.  
  9394.     pr_info("FG Probe success - FG Revision DIG:%d.%d ANA:%d.%d PMIC subtype=%d\n",
  9395.         chip->revision[DIG_MAJOR], chip->revision[DIG_MINOR],
  9396.         chip->revision[ANA_MAJOR], chip->revision[ANA_MINOR],
  9397.         chip->pmic_subtype);
  9398.  
  9399.     return rc;
  9400.  
  9401. power_supply_unregister:
  9402.     power_supply_unregister(&chip->bms_psy);
  9403. cancel_work:
  9404.     fg_cancel_all_works(chip);
  9405. of_init_fail:
  9406.     mutex_destroy(&chip->rslow_comp.lock);
  9407.     mutex_destroy(&chip->rw_lock);
  9408.     mutex_destroy(&chip->cyc_ctr.lock);
  9409.     mutex_destroy(&chip->learning_data.learning_lock);
  9410.     mutex_destroy(&chip->sysfs_restart_lock);
  9411.     mutex_destroy(&chip->ima_recovery_lock);
  9412.     wakeup_source_trash(&chip->resume_soc_wakeup_source.source);
  9413.     wakeup_source_trash(&chip->empty_check_wakeup_source.source);
  9414.     wakeup_source_trash(&chip->memif_wakeup_source.source);
  9415.     wakeup_source_trash(&chip->profile_wakeup_source.source);
  9416.     wakeup_source_trash(&chip->update_temp_wakeup_source.source);
  9417.     wakeup_source_trash(&chip->update_sram_wakeup_source.source);
  9418.     wakeup_source_trash(&chip->gain_comp_wakeup_source.source);
  9419.     wakeup_source_trash(&chip->capacity_learning_wakeup_source.source);
  9420.     wakeup_source_trash(&chip->esr_extract_wakeup_source.source);
  9421.     wakeup_source_trash(&chip->slope_limit_wakeup_source.source);
  9422.     wakeup_source_trash(&chip->dischg_gain_wakeup_source.source);
  9423.     wakeup_source_trash(&chip->fg_reset_wakeup_source.source);
  9424.     wakeup_source_trash(&chip->cc_soc_wakeup_source.source);
  9425.     wakeup_source_trash(&chip->sanity_wakeup_source.source);
  9426.     return rc;
  9427. }
  9428.  
  9429. static void check_and_update_sram_data(struct fg_chip *chip)
  9430. {
  9431.     unsigned long current_time = 0, next_update_time, time_left;
  9432.  
  9433.     get_current_time(&current_time);
  9434.  
  9435.     next_update_time = chip->last_temp_update_time
  9436.         + (TEMP_PERIOD_UPDATE_MS / 1000);
  9437.  
  9438.     if (next_update_time > current_time)
  9439.         time_left = next_update_time - current_time;
  9440.     else
  9441.         time_left = 0;
  9442.  
  9443.     schedule_delayed_work(
  9444.         &chip->update_temp_work, msecs_to_jiffies(time_left * 1000));
  9445.  
  9446.     next_update_time = chip->last_sram_update_time
  9447.         + (fg_sram_update_period_ms / 1000);
  9448.  
  9449.     if (next_update_time > current_time)
  9450.         time_left = next_update_time - current_time;
  9451.     else
  9452.         time_left = 0;
  9453.  
  9454.     schedule_delayed_work(
  9455.         &chip->update_sram_data, msecs_to_jiffies(time_left * 1000));
  9456. }
  9457.  
  9458. static int fg_suspend(struct device *dev)
  9459. {
  9460.     struct fg_chip *chip = dev_get_drvdata(dev);
  9461.  
  9462.     if (!chip->sw_rbias_ctrl)
  9463.         return 0;
  9464.  
  9465.     cancel_delayed_work(&chip->update_temp_work);
  9466.     cancel_delayed_work(&chip->update_sram_data);
  9467.  
  9468.     return 0;
  9469. }
  9470.  
  9471. static int fg_resume(struct device *dev)
  9472. {
  9473.     struct fg_chip *chip = dev_get_drvdata(dev);
  9474.  
  9475.     if (!chip->sw_rbias_ctrl)
  9476.         return 0;
  9477.  
  9478.     check_and_update_sram_data(chip);
  9479.     return 0;
  9480. }
  9481.  
  9482. static void fg_check_ima_idle(struct fg_chip *chip)
  9483. {
  9484.     bool rif_mem_sts = true;
  9485.     int rc, time_count = 0;
  9486.  
  9487.     mutex_lock(&chip->rw_lock);
  9488.     /* Make sure IMA is idle */
  9489.     while (1) {
  9490.         rc = fg_check_rif_mem_access(chip, &rif_mem_sts);
  9491.         if (rc)
  9492.             break;
  9493.  
  9494.         if (!rif_mem_sts)
  9495.             break;
  9496.  
  9497.         if  (time_count > 4) {
  9498.             pr_err("Waited for ~16ms polling RIF_MEM_ACCESS_REQ\n");
  9499.             fg_run_iacs_clear_sequence(chip);
  9500.             break;
  9501.         }
  9502.  
  9503.         /* Wait for 4ms before reading RIF_MEM_ACCESS_REQ again */
  9504.         usleep_range(4000, 4100);
  9505.         time_count++;
  9506.     }
  9507.     mutex_unlock(&chip->rw_lock);
  9508. }
  9509.  
  9510. static void fg_shutdown(struct spmi_device *spmi)
  9511. {
  9512.     struct fg_chip *chip = dev_get_drvdata(&spmi->dev);
  9513.  
  9514.     if (fg_debug_mask & FG_STATUS)
  9515.         pr_emerg("FG shutdown started\n");
  9516.     if (chip->rslow_comp.active)
  9517.         fg_rslow_charge_comp_clear(chip);
  9518.     fg_cancel_all_works(chip);
  9519.     fg_check_ima_idle(chip);
  9520.     chip->fg_shutdown = true;
  9521.     if (fg_debug_mask & FG_STATUS)
  9522.         pr_emerg("FG shutdown complete\n");
  9523. }
  9524.  
  9525. static const struct dev_pm_ops qpnp_fg_pm_ops = {
  9526.     .suspend    = fg_suspend,
  9527.     .resume     = fg_resume,
  9528. };
  9529.  
  9530. static int fg_reset_lockup_set(const char *val, const struct kernel_param *kp)
  9531. {
  9532.     int rc;
  9533.     struct power_supply *bms_psy;
  9534.     struct fg_chip *chip;
  9535.     int old_val = fg_reset_on_lockup;
  9536.  
  9537.     rc = param_set_int(val, kp);
  9538.     if (rc) {
  9539.         pr_err("Unable to set fg_reset_on_lockup: %d\n", rc);
  9540.         return rc;
  9541.     }
  9542.  
  9543.     if (fg_reset_on_lockup != 0 && fg_reset_on_lockup != 1) {
  9544.         pr_err("Bad value %d\n", fg_reset_on_lockup);
  9545.         fg_reset_on_lockup = old_val;
  9546.         return -EINVAL;
  9547.     }
  9548.  
  9549.     bms_psy = power_supply_get_by_name("bms");
  9550.     if (!bms_psy) {
  9551.         pr_err("bms psy not found\n");
  9552.         return 0;
  9553.     }
  9554.  
  9555.     chip = container_of(bms_psy, struct fg_chip, bms_psy);
  9556.     if (!chip->ima_supported) {
  9557.         pr_err("Cannot set this for non-IMA supported FG\n");
  9558.         fg_reset_on_lockup = old_val;
  9559.         return -EINVAL;
  9560.     }
  9561.  
  9562.     if (fg_debug_mask & FG_STATUS)
  9563.         pr_info("fg_reset_on_lockup set to %d\n", fg_reset_on_lockup);
  9564.  
  9565.     if (fg_reset_on_lockup)
  9566.         schedule_delayed_work(&chip->check_sanity_work,
  9567.             msecs_to_jiffies(1000));
  9568.     else
  9569.         cancel_delayed_work_sync(&chip->check_sanity_work);
  9570.  
  9571.     return rc;
  9572. }
  9573.  
  9574. static struct kernel_param_ops fg_reset_ops = {
  9575.     .set = fg_reset_lockup_set,
  9576.     .get = param_get_int,
  9577. };
  9578.  
  9579. module_param_cb(reset_on_lockup, &fg_reset_ops, &fg_reset_on_lockup, 0644);
  9580.  
  9581. static int fg_sense_type_set(const char *val, const struct kernel_param *kp)
  9582. {
  9583.     int rc;
  9584.     struct power_supply *bms_psy;
  9585.     struct fg_chip *chip;
  9586.     int old_fg_sense_type = fg_sense_type;
  9587.  
  9588.     rc = param_set_int(val, kp);
  9589.     if (rc) {
  9590.         pr_err("Unable to set fg_sense_type: %d\n", rc);
  9591.         return rc;
  9592.     }
  9593.  
  9594.     if (fg_sense_type != 0 && fg_sense_type != 1) {
  9595.         pr_err("Bad value %d\n", fg_sense_type);
  9596.         fg_sense_type = old_fg_sense_type;
  9597.         return -EINVAL;
  9598.     }
  9599.  
  9600.     if (fg_debug_mask & FG_STATUS)
  9601.         pr_info("fg_sense_type set to %d\n", fg_sense_type);
  9602.  
  9603.     bms_psy = power_supply_get_by_name("bms");
  9604.     if (!bms_psy) {
  9605.         pr_err("bms psy not found\n");
  9606.         return 0;
  9607.     }
  9608.  
  9609.     chip = container_of(bms_psy, struct fg_chip, bms_psy);
  9610.     rc = set_prop_sense_type(chip, fg_sense_type);
  9611.     return rc;
  9612. }
  9613.  
  9614. static struct kernel_param_ops fg_sense_type_ops = {
  9615.     .set = fg_sense_type_set,
  9616.     .get = param_get_int,
  9617. };
  9618.  
  9619. module_param_cb(sense_type, &fg_sense_type_ops, &fg_sense_type, 0644);
  9620.  
  9621. static int fg_restart_set(const char *val, const struct kernel_param *kp)
  9622. {
  9623.     struct power_supply *bms_psy;
  9624.     struct fg_chip *chip;
  9625.  
  9626.     bms_psy = power_supply_get_by_name("bms");
  9627.     if (!bms_psy) {
  9628.         pr_err("bms psy not found\n");
  9629.         return 0;
  9630.     }
  9631.     chip = container_of(bms_psy, struct fg_chip, bms_psy);
  9632.  
  9633.     mutex_lock(&chip->sysfs_restart_lock);
  9634.     if (fg_restart != 0) {
  9635.         mutex_unlock(&chip->sysfs_restart_lock);
  9636.         return 0;
  9637.     }
  9638.     fg_restart = 1;
  9639.     mutex_unlock(&chip->sysfs_restart_lock);
  9640.  
  9641.     if (fg_debug_mask & FG_STATUS)
  9642.         pr_info("fuel gauge restart initiated from sysfs...\n");
  9643.  
  9644.     schedule_work(&chip->sysfs_restart_work);
  9645.     return 0;
  9646. }
  9647.  
  9648. static struct kernel_param_ops fg_restart_ops = {
  9649.     .set = fg_restart_set,
  9650.     .get = param_get_int,
  9651. };
  9652.  
  9653. module_param_cb(restart, &fg_restart_ops, &fg_restart, 0644);
  9654.  
  9655. static struct spmi_driver fg_driver = {
  9656.     .driver     = {
  9657.         .name       = QPNP_FG_DEV_NAME,
  9658.         .of_match_table = fg_match_table,
  9659.         .pm     = &qpnp_fg_pm_ops,
  9660.     },
  9661.     .probe      = fg_probe,
  9662.     .remove     = fg_remove,
  9663.     .shutdown   = fg_shutdown,
  9664. };
  9665.  
  9666. static int __init fg_init(void)
  9667. {
  9668.     return spmi_driver_register(&fg_driver);
  9669. }
  9670.  
  9671. static void __exit fg_exit(void)
  9672. {
  9673.     return spmi_driver_unregister(&fg_driver);
  9674. }
  9675.  
  9676. module_init(fg_init);
  9677. module_exit(fg_exit);
  9678.  
  9679. MODULE_DESCRIPTION("QPNP Fuel Gauge Driver");
  9680. MODULE_LICENSE("GPL v2");
  9681. MODULE_ALIAS("platform:" QPNP_FG_DEV_NAME);
RAW Paste Data