Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • agd5f/linux
  • ltuikov/linux
  • FireBurn/linux
  • u6/linux
  • nikitalita/linux
  • siqueira/linux
  • magali/linux
  • amos/linux-display
  • isinyaaa/linux
  • somalapuram/linux
  • nirmoy/linux
  • hwentland/linux
  • hakzsam/linux
  • siqueira/linux-kunit
  • xushuotao/linux
  • lixian/linux
  • asheplyakov/linux
  • ap6711451/linux
  • alonsopascacioflores/linux
  • pyuan/linux
  • pepp/linux
  • alex.hung/linux
  • ckborah/linux-colorpipeline
  • lucmann/linux
  • MSkeffington/amdgfx-linux-fork
25 results
Show changes
Showing
with 221 additions and 72 deletions
......@@ -4590,6 +4590,8 @@ void sev_es_vcpu_reset(struct vcpu_svm *svm)
void sev_es_prepare_switch_to_guest(struct vcpu_svm *svm, struct sev_es_save_area *hostsa)
{
struct kvm *kvm = svm->vcpu.kvm;
/*
* All host state for SEV-ES guests is categorized into three swap types
* based on how it is handled by hardware during a world switch:
......@@ -4613,14 +4615,22 @@ void sev_es_prepare_switch_to_guest(struct vcpu_svm *svm, struct sev_es_save_are
/*
* If DebugSwap is enabled, debug registers are loaded but NOT saved by
* the CPU (Type-B). If DebugSwap is disabled/unsupported, the CPU both
* saves and loads debug registers (Type-A).
* the CPU (Type-B). If DebugSwap is disabled/unsupported, the CPU does
* not save or load debug registers. Sadly, KVM can't prevent SNP
* guests from lying about DebugSwap on secondary vCPUs, i.e. the
* SEV_FEATURES provided at "AP Create" isn't guaranteed to match what
* the guest has actually enabled (or not!) in the VMSA.
*
* If DebugSwap is *possible*, save the masks so that they're restored
* if the guest enables DebugSwap. But for the DRs themselves, do NOT
* rely on the CPU to restore the host values; KVM will restore them as
* needed in common code, via hw_breakpoint_restore(). Note, KVM does
* NOT support virtualizing Breakpoint Extensions, i.e. the mask MSRs
* don't need to be restored per se, KVM just needs to ensure they are
* loaded with the correct values *if* the CPU writes the MSRs.
*/
if (sev_vcpu_has_debug_swap(svm)) {
hostsa->dr0 = native_get_debugreg(0);
hostsa->dr1 = native_get_debugreg(1);
hostsa->dr2 = native_get_debugreg(2);
hostsa->dr3 = native_get_debugreg(3);
if (sev_vcpu_has_debug_swap(svm) ||
(sev_snp_guest(kvm) && cpu_feature_enabled(X86_FEATURE_DEBUG_SWAP))) {
hostsa->dr0_addr_mask = amd_get_dr_addr_mask(0);
hostsa->dr1_addr_mask = amd_get_dr_addr_mask(1);
hostsa->dr2_addr_mask = amd_get_dr_addr_mask(2);
......
......@@ -3165,6 +3165,27 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
kvm_pr_unimpl_wrmsr(vcpu, ecx, data);
break;
}
/*
* AMD changed the architectural behavior of bits 5:2. On CPUs
* without BusLockTrap, bits 5:2 control "external pins", but
* on CPUs that support BusLockDetect, bit 2 enables BusLockTrap
* and bits 5:3 are reserved-to-zero. Sadly, old KVM allowed
* the guest to set bits 5:2 despite not actually virtualizing
* Performance-Monitoring/Breakpoint external pins. Drop bits
* 5:2 for backwards compatibility.
*/
data &= ~GENMASK(5, 2);
/*
* Suppress BTF as KVM doesn't virtualize BTF, but there's no
* way to communicate lack of support to the guest.
*/
if (data & DEBUGCTLMSR_BTF) {
kvm_pr_unimpl_wrmsr(vcpu, MSR_IA32_DEBUGCTLMSR, data);
data &= ~DEBUGCTLMSR_BTF;
}
if (data & DEBUGCTL_RESERVED_BITS)
return 1;
......@@ -4189,6 +4210,18 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_in
guest_state_enter_irqoff();
/*
* Set RFLAGS.IF prior to VMRUN, as the host's RFLAGS.IF at the time of
* VMRUN controls whether or not physical IRQs are masked (KVM always
* runs with V_INTR_MASKING_MASK). Toggle RFLAGS.IF here to avoid the
* temptation to do STI+VMRUN+CLI, as AMD CPUs bleed the STI shadow
* into guest state if delivery of an event during VMRUN triggers a
* #VMEXIT, and the guest_state transitions already tell lockdep that
* IRQs are being enabled/disabled. Note! GIF=0 for the entirety of
* this path, so IRQs aren't actually unmasked while running host code.
*/
raw_local_irq_enable();
amd_clear_divider();
if (sev_es_guest(vcpu->kvm))
......@@ -4197,6 +4230,8 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_in
else
__svm_vcpu_run(svm, spec_ctrl_intercepted);
raw_local_irq_disable();
guest_state_exit_irqoff();
}
......@@ -4253,6 +4288,16 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu,
clgi();
kvm_load_guest_xsave_state(vcpu);
/*
* Hardware only context switches DEBUGCTL if LBR virtualization is
* enabled. Manually load DEBUGCTL if necessary (and restore it after
* VM-Exit), as running with the host's DEBUGCTL can negatively affect
* guest state and can even be fatal, e.g. due to Bus Lock Detect.
*/
if (!(svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK) &&
vcpu->arch.host_debugctl != svm->vmcb->save.dbgctl)
update_debugctlmsr(svm->vmcb->save.dbgctl);
kvm_wait_lapic_expire(vcpu);
/*
......@@ -4280,6 +4325,10 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu,
if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
kvm_before_interrupt(vcpu, KVM_HANDLING_NMI);
if (!(svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK) &&
vcpu->arch.host_debugctl != svm->vmcb->save.dbgctl)
update_debugctlmsr(vcpu->arch.host_debugctl);
kvm_load_host_xsave_state(vcpu);
stgi();
......
......@@ -584,7 +584,7 @@ static inline bool is_vnmi_enabled(struct vcpu_svm *svm)
/* svm.c */
#define MSR_INVALID 0xffffffffU
#define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
#define DEBUGCTL_RESERVED_BITS (~DEBUGCTLMSR_LBR)
extern bool dump_invalid_vmcb;
......
......@@ -170,12 +170,8 @@ SYM_FUNC_START(__svm_vcpu_run)
mov VCPU_RDI(%_ASM_DI), %_ASM_DI
/* Enter guest mode */
sti
3: vmrun %_ASM_AX
4:
cli
/* Pop @svm to RAX while it's the only available register. */
pop %_ASM_AX
......@@ -340,12 +336,8 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run)
mov KVM_VMCB_pa(%rax), %rax
/* Enter guest mode */
sti
1: vmrun %rax
2: cli
2:
/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
FILL_RETURN_BUFFER %rax, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_VMEXIT
......
......@@ -1514,16 +1514,12 @@ void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu,
*/
void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
if (vcpu->scheduled_out && !kvm_pause_in_guest(vcpu->kvm))
shrink_ple_window(vcpu);
vmx_vcpu_load_vmcs(vcpu, cpu, NULL);
vmx_vcpu_pi_load(vcpu, cpu);
vmx->host_debugctlmsr = get_debugctlmsr();
}
void vmx_vcpu_put(struct kvm_vcpu *vcpu)
......@@ -7458,8 +7454,8 @@ fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit)
}
/* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */
if (vmx->host_debugctlmsr)
update_debugctlmsr(vmx->host_debugctlmsr);
if (vcpu->arch.host_debugctl)
update_debugctlmsr(vcpu->arch.host_debugctl);
#ifndef CONFIG_X86_64
/*
......
......@@ -340,8 +340,6 @@ struct vcpu_vmx {
/* apic deadline value in host tsc */
u64 hv_deadline_tsc;
unsigned long host_debugctlmsr;
/*
* Only bits masked by msr_ia32_feature_control_valid_bits can be set in
* msr_ia32_feature_control. FEAT_CTL_LOCKED is always included
......
......@@ -10968,6 +10968,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
set_debugreg(0, 7);
}
vcpu->arch.host_debugctl = get_debugctlmsr();
guest_timing_enter_irqoff();
for (;;) {
......
......@@ -682,7 +682,7 @@ static void utf16_le_to_7bit(const __le16 *in, unsigned int size, u8 *out)
out[size] = 0;
while (i < size) {
u8 c = le16_to_cpu(in[i]) & 0xff;
u8 c = le16_to_cpu(in[i]) & 0x7f;
if (c && !isprint(c))
c = '!';
......
......@@ -21,9 +21,15 @@ struct platform_profile_handler {
struct device dev;
int minor;
unsigned long choices[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
unsigned long hidden_choices[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
const struct platform_profile_ops *ops;
};
struct aggregate_choices_data {
unsigned long aggregate[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
int count;
};
static const char * const profile_names[] = {
[PLATFORM_PROFILE_LOW_POWER] = "low-power",
[PLATFORM_PROFILE_COOL] = "cool",
......@@ -73,7 +79,7 @@ static int _store_class_profile(struct device *dev, void *data)
lockdep_assert_held(&profile_lock);
handler = to_pprof_handler(dev);
if (!test_bit(*bit, handler->choices))
if (!test_bit(*bit, handler->choices) && !test_bit(*bit, handler->hidden_choices))
return -EOPNOTSUPP;
return handler->ops->profile_set(dev, *bit);
......@@ -239,21 +245,44 @@ static const struct class platform_profile_class = {
/**
* _aggregate_choices - Aggregate the available profile choices
* @dev: The device
* @data: The available profile choices
* @arg: struct aggregate_choices_data
*
* Return: 0 on success, -errno on failure
*/
static int _aggregate_choices(struct device *dev, void *data)
static int _aggregate_choices(struct device *dev, void *arg)
{
unsigned long tmp[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
struct aggregate_choices_data *data = arg;
struct platform_profile_handler *handler;
unsigned long *aggregate = data;
lockdep_assert_held(&profile_lock);
handler = to_pprof_handler(dev);
if (test_bit(PLATFORM_PROFILE_LAST, aggregate))
bitmap_copy(aggregate, handler->choices, PLATFORM_PROFILE_LAST);
bitmap_or(tmp, handler->choices, handler->hidden_choices, PLATFORM_PROFILE_LAST);
if (test_bit(PLATFORM_PROFILE_LAST, data->aggregate))
bitmap_copy(data->aggregate, tmp, PLATFORM_PROFILE_LAST);
else
bitmap_and(aggregate, handler->choices, aggregate, PLATFORM_PROFILE_LAST);
bitmap_and(data->aggregate, tmp, data->aggregate, PLATFORM_PROFILE_LAST);
data->count++;
return 0;
}
/**
* _remove_hidden_choices - Remove hidden choices from aggregate data
* @dev: The device
* @arg: struct aggregate_choices_data
*
* Return: 0 on success, -errno on failure
*/
static int _remove_hidden_choices(struct device *dev, void *arg)
{
struct aggregate_choices_data *data = arg;
struct platform_profile_handler *handler;
lockdep_assert_held(&profile_lock);
handler = to_pprof_handler(dev);
bitmap_andnot(data->aggregate, handler->choices,
handler->hidden_choices, PLATFORM_PROFILE_LAST);
return 0;
}
......@@ -270,22 +299,31 @@ static ssize_t platform_profile_choices_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
unsigned long aggregate[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
struct aggregate_choices_data data = {
.aggregate = { [0 ... BITS_TO_LONGS(PLATFORM_PROFILE_LAST) - 1] = ~0UL },
.count = 0,
};
int err;
set_bit(PLATFORM_PROFILE_LAST, aggregate);
set_bit(PLATFORM_PROFILE_LAST, data.aggregate);
scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) {
err = class_for_each_device(&platform_profile_class, NULL,
aggregate, _aggregate_choices);
&data, _aggregate_choices);
if (err)
return err;
if (data.count == 1) {
err = class_for_each_device(&platform_profile_class, NULL,
&data, _remove_hidden_choices);
if (err)
return err;
}
}
/* no profile handler registered any more */
if (bitmap_empty(aggregate, PLATFORM_PROFILE_LAST))
if (bitmap_empty(data.aggregate, PLATFORM_PROFILE_LAST))
return -EINVAL;
return _commmon_choices_show(aggregate, buf);
return _commmon_choices_show(data.aggregate, buf);
}
/**
......@@ -373,7 +411,10 @@ static ssize_t platform_profile_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
unsigned long choices[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
struct aggregate_choices_data data = {
.aggregate = { [0 ... BITS_TO_LONGS(PLATFORM_PROFILE_LAST) - 1] = ~0UL },
.count = 0,
};
int ret;
int i;
......@@ -381,13 +422,13 @@ static ssize_t platform_profile_store(struct device *dev,
i = sysfs_match_string(profile_names, buf);
if (i < 0 || i == PLATFORM_PROFILE_CUSTOM)
return -EINVAL;
set_bit(PLATFORM_PROFILE_LAST, choices);
set_bit(PLATFORM_PROFILE_LAST, data.aggregate);
scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) {
ret = class_for_each_device(&platform_profile_class, NULL,
choices, _aggregate_choices);
&data, _aggregate_choices);
if (ret)
return ret;
if (!test_bit(i, choices))
if (!test_bit(i, data.aggregate))
return -EOPNOTSUPP;
ret = class_for_each_device(&platform_profile_class, NULL, &i,
......@@ -453,12 +494,15 @@ EXPORT_SYMBOL_GPL(platform_profile_notify);
*/
int platform_profile_cycle(void)
{
struct aggregate_choices_data data = {
.aggregate = { [0 ... BITS_TO_LONGS(PLATFORM_PROFILE_LAST) - 1] = ~0UL },
.count = 0,
};
enum platform_profile_option next = PLATFORM_PROFILE_LAST;
enum platform_profile_option profile = PLATFORM_PROFILE_LAST;
unsigned long choices[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
int err;
set_bit(PLATFORM_PROFILE_LAST, choices);
set_bit(PLATFORM_PROFILE_LAST, data.aggregate);
scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) {
err = class_for_each_device(&platform_profile_class, NULL,
&profile, _aggregate_profiles);
......@@ -470,14 +514,14 @@ int platform_profile_cycle(void)
return -EINVAL;
err = class_for_each_device(&platform_profile_class, NULL,
choices, _aggregate_choices);
&data, _aggregate_choices);
if (err)
return err;
/* never iterate into a custom if all drivers supported it */
clear_bit(PLATFORM_PROFILE_CUSTOM, choices);
clear_bit(PLATFORM_PROFILE_CUSTOM, data.aggregate);
next = find_next_bit_wrap(choices,
next = find_next_bit_wrap(data.aggregate,
PLATFORM_PROFILE_LAST,
profile + 1);
......@@ -532,6 +576,14 @@ struct device *platform_profile_register(struct device *dev, const char *name,
return ERR_PTR(-EINVAL);
}
if (ops->hidden_choices) {
err = ops->hidden_choices(drvdata, pprof->hidden_choices);
if (err) {
dev_err(dev, "platform_profile hidden_choices failed\n");
return ERR_PTR(err);
}
}
guard(mutex)(&profile_lock);
/* create class interface for individual handler */
......
......@@ -274,6 +274,7 @@ static void binderfs_evict_inode(struct inode *inode)
mutex_unlock(&binderfs_minors_mutex);
if (refcount_dec_and_test(&device->ref)) {
hlist_del_init(&device->hlist);
kfree(device->context.name);
kfree(device);
}
......
......@@ -2079,6 +2079,7 @@ static bool __fw_devlink_relax_cycles(struct fwnode_handle *con_handle,
out:
sup_handle->flags &= ~FWNODE_FLAG_VISITED;
put_device(sup_dev);
put_device(con_dev);
put_device(par_dev);
return ret;
}
......
......@@ -2715,9 +2715,12 @@ static int ublk_ctrl_set_params(struct ublk_device *ub,
if (ph.len > sizeof(struct ublk_params))
ph.len = sizeof(struct ublk_params);
/* parameters can only be changed when device isn't live */
mutex_lock(&ub->mutex);
if (ub->dev_info.state == UBLK_S_DEV_LIVE) {
if (test_bit(UB_STATE_USED, &ub->state)) {
/*
* Parameters can only be changed when device hasn't
* been started yet
*/
ret = -EACCES;
} else if (copy_from_user(&ub->params, argp, ph.len)) {
ret = -EFAULT;
......
......@@ -3644,6 +3644,7 @@ static ssize_t force_poll_sync_write(struct file *file,
}
static const struct file_operations force_poll_sync_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = force_poll_sync_read,
.write = force_poll_sync_write,
......
......@@ -1095,8 +1095,9 @@ static void mhi_pci_recovery_work(struct work_struct *work)
err_unprepare:
mhi_unprepare_after_power_down(mhi_cntrl);
err_try_reset:
if (pci_reset_function(pdev))
dev_err(&pdev->dev, "Recovery failed\n");
err = pci_try_reset_function(pdev);
if (err)
dev_err(&pdev->dev, "Recovery failed: %d\n", err);
}
static void health_check(struct timer_list *t)
......
......@@ -109,9 +109,29 @@ static int simple_pm_bus_runtime_resume(struct device *dev)
return 0;
}
static int simple_pm_bus_suspend(struct device *dev)
{
struct simple_pm_bus *bus = dev_get_drvdata(dev);
if (!bus)
return 0;
return pm_runtime_force_suspend(dev);
}
static int simple_pm_bus_resume(struct device *dev)
{
struct simple_pm_bus *bus = dev_get_drvdata(dev);
if (!bus)
return 0;
return pm_runtime_force_resume(dev);
}
static const struct dev_pm_ops simple_pm_bus_pm_ops = {
RUNTIME_PM_OPS(simple_pm_bus_runtime_suspend, simple_pm_bus_runtime_resume, NULL)
NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
NOIRQ_SYSTEM_SLEEP_PM_OPS(simple_pm_bus_suspend, simple_pm_bus_resume)
};
#define ONLY_BUS ((void *) 1) /* Match if the device is only a bus. */
......
......@@ -473,8 +473,12 @@ static ssize_t driver_override_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct cdx_device *cdx_dev = to_cdx_device(dev);
ssize_t len;
return sysfs_emit(buf, "%s\n", cdx_dev->driver_override);
device_lock(dev);
len = sysfs_emit(buf, "%s\n", cdx_dev->driver_override);
device_unlock(dev);
return len;
}
static DEVICE_ATTR_RW(driver_override);
......
......@@ -264,8 +264,8 @@ int misc_register(struct miscdevice *misc)
device_create_with_groups(&misc_class, misc->parent, dev,
misc, misc->groups, "%s", misc->name);
if (IS_ERR(misc->this_device)) {
misc_minor_free(misc->minor);
if (is_dynamic) {
misc_minor_free(misc->minor);
misc->minor = MISC_DYNAMIC_MINOR;
}
err = PTR_ERR(misc->this_device);
......
......@@ -923,14 +923,14 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe,
pipe_lock(pipe);
ret = 0;
if (pipe_empty(pipe->head, pipe->tail))
if (pipe_is_empty(pipe))
goto error_out;
ret = wait_port_writable(port, filp->f_flags & O_NONBLOCK);
if (ret < 0)
goto error_out;
occupancy = pipe_occupancy(pipe->head, pipe->tail);
occupancy = pipe_buf_usage(pipe);
buf = alloc_buf(port->portdev->vdev, 0, occupancy);
if (!buf) {
......
......@@ -119,10 +119,15 @@ static ssize_t new_device_store(struct device_driver *driver, const char *buf,
struct platform_device *pdev;
int res, id;
if (!try_module_get(THIS_MODULE))
return -ENOENT;
/* kernfs guarantees string termination, so count + 1 is safe */
aggr = kzalloc(sizeof(*aggr) + count + 1, GFP_KERNEL);
if (!aggr)
return -ENOMEM;
if (!aggr) {
res = -ENOMEM;
goto put_module;
}
memcpy(aggr->args, buf, count + 1);
......@@ -161,6 +166,7 @@ static ssize_t new_device_store(struct device_driver *driver, const char *buf,
}
aggr->pdev = pdev;
module_put(THIS_MODULE);
return count;
remove_table:
......@@ -175,6 +181,8 @@ static ssize_t new_device_store(struct device_driver *driver, const char *buf,
kfree(aggr->lookups);
free_ga:
kfree(aggr);
put_module:
module_put(THIS_MODULE);
return res;
}
......@@ -203,13 +211,19 @@ static ssize_t delete_device_store(struct device_driver *driver,
if (error)
return error;
if (!try_module_get(THIS_MODULE))
return -ENOENT;
mutex_lock(&gpio_aggregator_lock);
aggr = idr_remove(&gpio_aggregator_idr, id);
mutex_unlock(&gpio_aggregator_lock);
if (!aggr)
if (!aggr) {
module_put(THIS_MODULE);
return -ENOENT;
}
gpio_aggregator_free(aggr);
module_put(THIS_MODULE);
return count;
}
static DRIVER_ATTR_WO(delete_device);
......
......@@ -40,7 +40,7 @@ struct gpio_rcar_info {
struct gpio_rcar_priv {
void __iomem *base;
spinlock_t lock;
raw_spinlock_t lock;
struct device *dev;
struct gpio_chip gpio_chip;
unsigned int irq_parent;
......@@ -123,7 +123,7 @@ static void gpio_rcar_config_interrupt_input_mode(struct gpio_rcar_priv *p,
* "Setting Level-Sensitive Interrupt Input Mode"
*/
spin_lock_irqsave(&p->lock, flags);
raw_spin_lock_irqsave(&p->lock, flags);
/* Configure positive or negative logic in POSNEG */
gpio_rcar_modify_bit(p, POSNEG, hwirq, !active_high_rising_edge);
......@@ -142,7 +142,7 @@ static void gpio_rcar_config_interrupt_input_mode(struct gpio_rcar_priv *p,
if (!level_trigger)
gpio_rcar_write(p, INTCLR, BIT(hwirq));
spin_unlock_irqrestore(&p->lock, flags);
raw_spin_unlock_irqrestore(&p->lock, flags);
}
static int gpio_rcar_irq_set_type(struct irq_data *d, unsigned int type)
......@@ -246,7 +246,7 @@ static void gpio_rcar_config_general_input_output_mode(struct gpio_chip *chip,
* "Setting General Input Mode"
*/
spin_lock_irqsave(&p->lock, flags);
raw_spin_lock_irqsave(&p->lock, flags);
/* Configure positive logic in POSNEG */
gpio_rcar_modify_bit(p, POSNEG, gpio, false);
......@@ -261,7 +261,7 @@ static void gpio_rcar_config_general_input_output_mode(struct gpio_chip *chip,
if (p->info.has_outdtsel && output)
gpio_rcar_modify_bit(p, OUTDTSEL, gpio, false);
spin_unlock_irqrestore(&p->lock, flags);
raw_spin_unlock_irqrestore(&p->lock, flags);
}
static int gpio_rcar_request(struct gpio_chip *chip, unsigned offset)
......@@ -347,7 +347,7 @@ static int gpio_rcar_get_multiple(struct gpio_chip *chip, unsigned long *mask,
return 0;
}
spin_lock_irqsave(&p->lock, flags);
raw_spin_lock_irqsave(&p->lock, flags);
outputs = gpio_rcar_read(p, INOUTSEL);
m = outputs & bankmask;
if (m)
......@@ -356,7 +356,7 @@ static int gpio_rcar_get_multiple(struct gpio_chip *chip, unsigned long *mask,
m = ~outputs & bankmask;
if (m)
val |= gpio_rcar_read(p, INDT) & m;
spin_unlock_irqrestore(&p->lock, flags);
raw_spin_unlock_irqrestore(&p->lock, flags);
bits[0] = val;
return 0;
......@@ -367,9 +367,9 @@ static void gpio_rcar_set(struct gpio_chip *chip, unsigned offset, int value)
struct gpio_rcar_priv *p = gpiochip_get_data(chip);
unsigned long flags;
spin_lock_irqsave(&p->lock, flags);
raw_spin_lock_irqsave(&p->lock, flags);
gpio_rcar_modify_bit(p, OUTDT, offset, value);
spin_unlock_irqrestore(&p->lock, flags);
raw_spin_unlock_irqrestore(&p->lock, flags);
}
static void gpio_rcar_set_multiple(struct gpio_chip *chip, unsigned long *mask,
......@@ -386,12 +386,12 @@ static void gpio_rcar_set_multiple(struct gpio_chip *chip, unsigned long *mask,
if (!bankmask)
return;
spin_lock_irqsave(&p->lock, flags);
raw_spin_lock_irqsave(&p->lock, flags);
val = gpio_rcar_read(p, OUTDT);
val &= ~bankmask;
val |= (bankmask & bits[0]);
gpio_rcar_write(p, OUTDT, val);
spin_unlock_irqrestore(&p->lock, flags);
raw_spin_unlock_irqrestore(&p->lock, flags);
}
static int gpio_rcar_direction_output(struct gpio_chip *chip, unsigned offset,
......@@ -468,7 +468,12 @@ static int gpio_rcar_parse_dt(struct gpio_rcar_priv *p, unsigned int *npins)
p->info = *info;
ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, 0, &args);
*npins = ret == 0 ? args.args[2] : RCAR_MAX_GPIO_PER_BANK;
if (ret) {
*npins = RCAR_MAX_GPIO_PER_BANK;
} else {
*npins = args.args[2];
of_node_put(args.np);
}
if (*npins == 0 || *npins > RCAR_MAX_GPIO_PER_BANK) {
dev_warn(p->dev, "Invalid number of gpio lines %u, using %u\n",
......@@ -505,7 +510,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
return -ENOMEM;
p->dev = dev;
spin_lock_init(&p->lock);
raw_spin_lock_init(&p->lock);
/* Get device configuration from DT node */
ret = gpio_rcar_parse_dt(p, &npins);
......