text
stringlengths 213
7.14k
| idx
int64 16
12.5k
|
---|---|
--- initial
+++ final
@@ -1,19 +1,18 @@
static int ds1742_rtc_set_time(struct device *dev, struct rtc_time *tm) {
- struct platform_device *pdev = to_platform_device(dev);
- struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+ struct rtc_plat_data *pdata = dev_get_drvdata(dev);
void __iomem *ioaddr = pdata->ioaddr_rtc;
u8 century;
century = bin2bcd((tm->tm_year + 1900) / 100);
writeb(RTC_WRITE, ioaddr + RTC_CONTROL);
writeb(bin2bcd(tm->tm_year % 100), ioaddr + RTC_YEAR);
writeb(bin2bcd(tm->tm_mon + 1), ioaddr + RTC_MONTH);
writeb(bin2bcd(tm->tm_wday) & RTC_DAY_MASK, ioaddr + RTC_DAY);
writeb(bin2bcd(tm->tm_mday), ioaddr + RTC_DATE);
writeb(bin2bcd(tm->tm_hour), ioaddr + RTC_HOURS);
writeb(bin2bcd(tm->tm_min), ioaddr + RTC_MINUTES);
writeb(bin2bcd(tm->tm_sec) & RTC_SECONDS_MASK, ioaddr + RTC_SECONDS);
/* RTC_CENTURY and RTC_CONTROL share same register */
writeb(RTC_WRITE | (century & RTC_CENTURY_MASK), ioaddr + RTC_CENTURY);
writeb(century & RTC_CENTURY_MASK, ioaddr + RTC_CONTROL);
return 0;
}<sep>@@
identifier e1;
expression e2;
type T;
@@
(
- T e1 = to_platform_device(e2);
...
- platform_get_drvdata(e1)
+ dev_get_drvdata(e2)
... when any
&
T e1 = to_platform_device(e2);
<... when != e1
(
platform_get_drvdata(e1)
|
- &e1->dev
+ e2
)
...>
)
<|end_of_text|> | 1,019 |
--- initial
+++ final
@@ -1,13 +1,12 @@
static int pcap_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) {
- struct platform_device *pdev = to_platform_device(dev);
- struct pcap_rtc *pcap_rtc = platform_get_drvdata(pdev);
+ struct pcap_rtc *pcap_rtc = dev_get_drvdata(dev);
struct rtc_time *tm = &alrm->time;
unsigned long secs;
u32 tod, days;
rtc_tm_to_time(tm, &secs);
tod = secs % SEC_PER_DAY;
ezx_pcap_write(pcap_rtc->pcap, PCAP_REG_RTC_TODA, tod);
days = secs / SEC_PER_DAY;
ezx_pcap_write(pcap_rtc->pcap, PCAP_REG_RTC_DAYA, days);
return 0;
}<sep>@@
identifier e1;
expression e2;
type T;
@@
(
- T e1 = to_platform_device(e2);
...
- platform_get_drvdata(e1)
+ dev_get_drvdata(e2)
... when any
&
T e1 = to_platform_device(e2);
<... when != e1
(
platform_get_drvdata(e1)
|
- &e1->dev
+ e2
)
...>
)
<|end_of_text|> | 1,044 |
--- initial
+++ final
@@ -1,33 +1,32 @@
static int ds1685_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) {
- struct platform_device *pdev = to_platform_device(dev);
- struct ds1685_priv *rtc = platform_get_drvdata(pdev);
+ struct ds1685_priv *rtc = dev_get_drvdata(dev);
u8 seconds, minutes, hours, mday, ctrlb, ctrlc;
int ret;
/* Fetch the alarm info from the RTC alarm registers. */
ds1685_rtc_begin_data_access(rtc);
seconds = rtc->read(rtc, RTC_SECS_ALARM);
minutes = rtc->read(rtc, RTC_MINS_ALARM);
hours = rtc->read(rtc, RTC_HRS_ALARM);
mday = rtc->read(rtc, RTC_MDAY_ALARM);
ctrlb = rtc->read(rtc, RTC_CTRL_B);
ctrlc = rtc->read(rtc, RTC_CTRL_C);
ds1685_rtc_end_data_access(rtc);
/* Check the month date for validity. */
ret = ds1685_rtc_check_mday(rtc, mday);
if (ret) return ret;
/*
* Check the three alarm bytes.
*
* The Linux RTC system doesn't support the "don't care" capability
* of this RTC chip. We check for it anyways in case support is
* added in the future and only assign when we care.
*/
if (likely(seconds < 0xc0)) alrm->time.tm_sec = ds1685_rtc_bcd2bin(rtc, seconds, RTC_SECS_BCD_MASK, RTC_SECS_BIN_MASK);
if (likely(minutes < 0xc0)) alrm->time.tm_min = ds1685_rtc_bcd2bin(rtc, minutes, RTC_MINS_BCD_MASK, RTC_MINS_BIN_MASK);
if (likely(hours < 0xc0)) alrm->time.tm_hour = ds1685_rtc_bcd2bin(rtc, hours, RTC_HRS_24_BCD_MASK, RTC_HRS_24_BIN_MASK);
/* Write the data to rtc_wkalrm. */
alrm->time.tm_mday = ds1685_rtc_bcd2bin(rtc, mday, RTC_MDAY_BCD_MASK, RTC_MDAY_BIN_MASK);
alrm->enabled = !!(ctrlb & RTC_CTRL_B_AIE);
alrm->pending = !!(ctrlc & RTC_CTRL_C_AF);
return 0;
}<sep>@@
identifier e1;
expression e2;
type T;
@@
(
- T e1 = to_platform_device(e2);
...
- platform_get_drvdata(e1)
+ dev_get_drvdata(e2)
... when any
&
T e1 = to_platform_device(e2);
<... when != e1
(
platform_get_drvdata(e1)
|
- &e1->dev
+ e2
)
...>
)
<|end_of_text|> | 1,009 |
--- initial
+++ final
@@ -1,21 +1,20 @@
static time64_t get_alarm_or_time(struct device *dev, int time_alarm) {
- struct platform_device *pdev = to_platform_device(dev);
- struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+ struct rtc_plat_data *pdata = dev_get_drvdata(dev);
void __iomem *ioaddr = pdata->ioaddr;
u32 day = 0, hr = 0, min = 0, sec = 0, hr_min = 0;
switch (time_alarm) {
case MXC_RTC_TIME:
day = readw(ioaddr + RTC_DAYR);
hr_min = readw(ioaddr + RTC_HOURMIN);
sec = readw(ioaddr + RTC_SECOND);
break;
case MXC_RTC_ALARM:
day = readw(ioaddr + RTC_DAYALARM);
hr_min = readw(ioaddr + RTC_ALRM_HM) & 0xffff;
sec = readw(ioaddr + RTC_ALRM_SEC);
break;
}
hr = hr_min >> 8;
min = hr_min & 0xff;
return ((((time64_t)day * 24 + hr) * 60) + min) * 60 + sec;
}<sep>@@
identifier e1;
expression e2;
type T;
@@
(
- T e1 = to_platform_device(e2);
...
- platform_get_drvdata(e1)
+ dev_get_drvdata(e2)
... when any
&
T e1 = to_platform_device(e2);
<... when != e1
(
platform_get_drvdata(e1)
|
- &e1->dev
+ e2
)
...>
)
<|end_of_text|> | 10,972 |
--- initial
+++ final
@@ -1,9 +1,8 @@
static void asoc_ssc_exit(struct device *dev) {
- struct platform_device *pdev = to_platform_device(dev);
- struct ssc_device *ssc = platform_get_drvdata(pdev);
+ struct ssc_device *ssc = dev_get_drvdata(dev);
if (ssc->pdata->use_dma)
atmel_pcm_dma_platform_unregister(dev);
else
atmel_pcm_pdc_platform_unregister(dev);
snd_soc_unregister_component(dev);
}<sep>@@
identifier e1;
expression e2;
type T;
@@
(
- T e1 = to_platform_device(e2);
...
- platform_get_drvdata(e1)
+ dev_get_drvdata(e2)
... when any
&
T e1 = to_platform_device(e2);
<... when != e1
(
platform_get_drvdata(e1)
|
- &e1->dev
+ e2
)
...>
)
<|end_of_text|> | 11,034 |
--- initial
+++ final
@@ -1,8 +1,7 @@
static int dma40_runtime_suspend(struct device *dev) {
- struct platform_device *pdev = to_platform_device(dev);
- struct d40_base *base = platform_get_drvdata(pdev);
+ struct d40_base *base = dev_get_drvdata(dev);
d40_save_restore_registers(base, true);
/* Don't disable/enable clocks for v1 due to HW bugs */
if (base->rev != 1) writel_relaxed(base->gcc_pwr_off_mask, base->virtbase + D40_DREG_GCC);
return 0;
}<sep>@@
identifier e1;
expression e2;
type T;
@@
(
- T e1 = to_platform_device(e2);
...
- platform_get_drvdata(e1)
+ dev_get_drvdata(e2)
... when any
&
T e1 = to_platform_device(e2);
<... when != e1
(
platform_get_drvdata(e1)
|
- &e1->dev
+ e2
)
...>
)
<|end_of_text|> | 11,048 |
--- initial
+++ final
@@ -1,29 +1,28 @@
static int ds1742_rtc_read_time(struct device *dev, struct rtc_time *tm) {
- struct platform_device *pdev = to_platform_device(dev);
- struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+ struct rtc_plat_data *pdata = dev_get_drvdata(dev);
void __iomem *ioaddr = pdata->ioaddr_rtc;
unsigned int year, month, day, hour, minute, second, week;
unsigned int century;
/* give enough time to update RTC in case of continuous read */
if (pdata->last_jiffies == jiffies) msleep(1);
pdata->last_jiffies = jiffies;
writeb(RTC_READ, ioaddr + RTC_CONTROL);
second = readb(ioaddr + RTC_SECONDS) & RTC_SECONDS_MASK;
minute = readb(ioaddr + RTC_MINUTES);
hour = readb(ioaddr + RTC_HOURS);
day = readb(ioaddr + RTC_DATE);
week = readb(ioaddr + RTC_DAY) & RTC_DAY_MASK;
month = readb(ioaddr + RTC_MONTH);
year = readb(ioaddr + RTC_YEAR);
century = readb(ioaddr + RTC_CENTURY) & RTC_CENTURY_MASK;
writeb(0, ioaddr + RTC_CONTROL);
tm->tm_sec = bcd2bin(second);
tm->tm_min = bcd2bin(minute);
tm->tm_hour = bcd2bin(hour);
tm->tm_mday = bcd2bin(day);
tm->tm_wday = bcd2bin(week);
tm->tm_mon = bcd2bin(month) - 1;
/* year is 1900 + tm->tm_year */
tm->tm_year = bcd2bin(year) + bcd2bin(century) * 100 - 1900;
return 0;
}<sep>@@
identifier e1;
expression e2;
type T;
@@
(
- T e1 = to_platform_device(e2);
...
- platform_get_drvdata(e1)
+ dev_get_drvdata(e2)
... when any
&
T e1 = to_platform_device(e2);
<... when != e1
(
platform_get_drvdata(e1)
|
- &e1->dev
+ e2
)
...>
)
<|end_of_text|> | 10,956 |
--- initial
+++ final
@@ -1,19 +1,18 @@
static ssize_t dsicm_hw_revision_show(struct device *dev, struct device_attribute *attr, char *buf) {
- struct platform_device *pdev = to_platform_device(dev);
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
+ struct panel_drv_data *ddata = dev_get_drvdata(dev);
struct omap_dss_device *in = ddata->in;
u8 id1, id2, id3;
int r;
mutex_lock(&ddata->lock);
if (ddata->enabled) {
in->ops.dsi->bus_lock(in);
r = dsicm_wake_up(ddata);
if (!r) r = dsicm_get_id(ddata, &id1, &id2, &id3);
in->ops.dsi->bus_unlock(in);
} else {
r = -ENODEV;
}
mutex_unlock(&ddata->lock);
if (r) return r;
return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x\n", id1, id2, id3);
}<sep>@@
identifier e1;
expression e2;
type T;
@@
(
- T e1 = to_platform_device(e2);
...
- platform_get_drvdata(e1)
+ dev_get_drvdata(e2)
... when any
&
T e1 = to_platform_device(e2);
<... when != e1
(
platform_get_drvdata(e1)
|
- &e1->dev
+ e2
)
...>
)
<|end_of_text|> | 11,052 |
--- initial
+++ final
@@ -1,31 +1,30 @@
static int __maybe_unused auok190x_suspend(struct device *dev) {
- struct platform_device *pdev = to_platform_device(dev);
- struct fb_info *info = platform_get_drvdata(pdev);
+ struct fb_info *info = dev_get_drvdata(dev);
struct auok190xfb_par *par = info->par;
struct auok190x_board *board = par->board;
int ret;
dev_dbg(dev, "suspend\n");
if (board->quirks & AUOK190X_QUIRK_STANDBYBROKEN) {
/* suspend via powering off the ic */
dev_dbg(dev, "suspend with broken standby\n");
auok190x_power(par, 0);
} else {
dev_dbg(dev, "suspend using sleep\n");
/* the sleep state can only be entered from the standby state.
* pm_runtime_get_noresume gets called before the suspend call.
* So the devices usage count is >0 but it is not necessarily
* active.
*/
if (!pm_runtime_status_suspended(dev)) {
ret = auok190x_runtime_suspend(dev);
if (ret < 0) {
dev_err(dev, "auok190x_runtime_suspend failed with %d\n", ret);
return ret;
}
par->manual_standby = 1;
}
gpio_direction_output(board->gpio_nsleep, 0);
}
msleep(100);
return 0;
}<sep>@@
identifier e1;
expression e2;
type T;
@@
(
- T e1 = to_platform_device(e2);
...
- platform_get_drvdata(e1)
+ dev_get_drvdata(e2)
... when any
&
T e1 = to_platform_device(e2);
<... when != e1
(
platform_get_drvdata(e1)
|
- &e1->dev
+ e2
)
...>
)
<|end_of_text|> | 11,061 |
--- initial
+++ final
@@ -1,12 +1,11 @@
static int pcap_rtc_read_time(struct device *dev, struct rtc_time *tm) {
- struct platform_device *pdev = to_platform_device(dev);
- struct pcap_rtc *pcap_rtc = platform_get_drvdata(pdev);
+ struct pcap_rtc *pcap_rtc = dev_get_drvdata(dev);
unsigned long secs;
u32 tod, days;
ezx_pcap_read(pcap_rtc->pcap, PCAP_REG_RTC_TOD, &tod);
secs = tod & PCAP_RTC_TOD_MASK;
ezx_pcap_read(pcap_rtc->pcap, PCAP_REG_RTC_DAY, &days);
secs += (days & PCAP_RTC_DAY_MASK) * SEC_PER_DAY;
rtc_time_to_tm(secs, tm);
return 0;
}<sep>@@
identifier e1;
expression e2;
type T;
@@
(
- T e1 = to_platform_device(e2);
...
- platform_get_drvdata(e1)
+ dev_get_drvdata(e2)
... when any
&
T e1 = to_platform_device(e2);
<... when != e1
(
platform_get_drvdata(e1)
|
- &e1->dev
+ e2
)
...>
)
<|end_of_text|> | 10,981 |
--- initial
+++ final
@@ -1,16 +1,15 @@
static int at_dma_suspend_noirq(struct device *dev) {
- struct platform_device *pdev = to_platform_device(dev);
- struct at_dma *atdma = platform_get_drvdata(pdev);
+ struct at_dma *atdma = dev_get_drvdata(dev);
struct dma_chan *chan, *_chan;
/* preserve data */
list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, device_node) {
struct at_dma_chan *atchan = to_at_dma_chan(chan);
if (atc_chan_is_cyclic(atchan)) atc_suspend_cyclic(atchan);
atchan->save_cfg = channel_readl(atchan, CFG);
}
atdma->save_imr = dma_readl(atdma, EBCIMR);
/* disable DMA controller */
at_dma_off(atdma);
clk_disable_unprepare(atdma->clk);
return 0;
}<sep>@@
identifier e1;
expression e2;
type T;
@@
(
- T e1 = to_platform_device(e2);
...
- platform_get_drvdata(e1)
+ dev_get_drvdata(e2)
... when any
&
T e1 = to_platform_device(e2);
<... when != e1
(
platform_get_drvdata(e1)
|
- &e1->dev
+ e2
)
...>
)
<|end_of_text|> | 11,038 |
--- initial
+++ final
@@ -1,9 +1,8 @@
static int imx_uart_resume_noirq(struct device *dev) {
- struct platform_device *pdev = to_platform_device(dev);
- struct imx_port *sport = platform_get_drvdata(pdev);
+ struct imx_port *sport = dev_get_drvdata(dev);
int ret;
ret = clk_enable(sport->clk_ipg);
if (ret) return ret;
imx_uart_restore_context(sport);
return 0;
}<sep>@@
identifier e1;
expression e2;
type T;
@@
(
- T e1 = to_platform_device(e2);
...
- platform_get_drvdata(e1)
+ dev_get_drvdata(e2)
... when any
&
T e1 = to_platform_device(e2);
<... when != e1
(
platform_get_drvdata(e1)
|
- &e1->dev
+ e2
)
...>
)
<|end_of_text|> | 11,010 |
--- initial
+++ final
@@ -1,40 +1,39 @@
static umode_t asus_sysfs_is_visible(struct kobject *kobj, struct attribute *attr, int idx) {
struct device *dev = container_of(kobj, struct device, kobj);
- struct platform_device *pdev = to_platform_device(dev);
- struct asus_laptop *asus = platform_get_drvdata(pdev);
+ struct asus_laptop *asus = dev_get_drvdata(dev);
acpi_handle handle = asus->handle;
bool supported;
if (asus->is_pega_lucid) {
/* no ls_level interface on the Lucid */
if (attr == &dev_attr_ls_switch.attr)
supported = true;
else if (attr == &dev_attr_ls_level.attr)
supported = false;
else
goto normal;
return supported ? attr->mode : 0;
}
normal:
if (attr == &dev_attr_wlan.attr) {
supported = !acpi_check_handle(handle, METHOD_WLAN, NULL);
} else if (attr == &dev_attr_bluetooth.attr) {
supported = !acpi_check_handle(handle, METHOD_BLUETOOTH, NULL);
} else if (attr == &dev_attr_display.attr) {
supported = !acpi_check_handle(handle, METHOD_SWITCH_DISPLAY, NULL);
} else if (attr == &dev_attr_wimax.attr) {
supported = !acpi_check_handle(asus->handle, METHOD_WIMAX, NULL);
} else if (attr == &dev_attr_wwan.attr) {
supported = !acpi_check_handle(asus->handle, METHOD_WWAN, NULL);
} else if (attr == &dev_attr_ledd.attr) {
supported = !acpi_check_handle(handle, METHOD_LEDD, NULL);
} else if (attr == &dev_attr_ls_switch.attr || attr == &dev_attr_ls_level.attr) {
supported = !acpi_check_handle(handle, METHOD_ALS_CONTROL, NULL) && !acpi_check_handle(handle, METHOD_ALS_LEVEL, NULL);
} else if (attr == &dev_attr_ls_value.attr) {
supported = asus->is_pega_lucid;
} else if (attr == &dev_attr_gps.attr) {
supported = !acpi_check_handle(handle, METHOD_GPS_ON, NULL) && !acpi_check_handle(handle, METHOD_GPS_OFF, NULL) && !acpi_check_handle(handle, METHOD_GPS_STATUS, NULL);
} else {
supported = true;
}
return supported ? attr->mode : 0;
}<sep>@@
identifier e1;
expression e2;
type T;
@@
(
- T e1 = to_platform_device(e2);
...
- platform_get_drvdata(e1)
+ dev_get_drvdata(e2)
... when any
&
T e1 = to_platform_device(e2);
<... when != e1
(
platform_get_drvdata(e1)
|
- &e1->dev
+ e2
)
...>
)
<|end_of_text|> | 10,927 |
--- initial
+++ final
@@ -1,8 +1,7 @@
static int __maybe_unused cdns_runtime_suspend(struct device *dev) {
- struct platform_device *pdev = to_platform_device(dev);
- struct uart_port *port = platform_get_drvdata(pdev);
+ struct uart_port *port = dev_get_drvdata(dev);
struct cdns_uart *cdns_uart = port->private_data;
clk_disable(cdns_uart->uartclk);
clk_disable(cdns_uart->pclk);
return 0;
}<sep>@@
identifier e1;
expression e2;
type T;
@@
(
- T e1 = to_platform_device(e2);
...
- platform_get_drvdata(e1)
+ dev_get_drvdata(e2)
... when any
&
T e1 = to_platform_device(e2);
<... when != e1
(
platform_get_drvdata(e1)
|
- &e1->dev
+ e2
)
...>
)
<|end_of_text|> | 11,019 |
--- initial
+++ final
@@ -1,7 +1,6 @@
static int ad7606_par8_read_block(struct device *dev, int count, void *buf) {
- struct platform_device *pdev = to_platform_device(dev);
- struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad7606_state *st = iio_priv(indio_dev);
insb((unsigned long)st->base_address, buf, count * 2);
return 0;
}<sep>@@
identifier e1;
expression e2;
type T;
@@
(
- T e1 = to_platform_device(e2);
...
- platform_get_drvdata(e1)
+ dev_get_drvdata(e2)
... when any
&
T e1 = to_platform_device(e2);
<... when != e1
(
platform_get_drvdata(e1)
|
- &e1->dev
+ e2
)
...>
)
<|end_of_text|> | 11,005 |
--- initial
+++ final
@@ -1,21 +1,20 @@
static int ds1216_rtc_read_time(struct device *dev, struct rtc_time *tm) {
- struct platform_device *pdev = to_platform_device(dev);
- struct ds1216_priv *priv = platform_get_drvdata(pdev);
+ struct ds1216_priv *priv = dev_get_drvdata(dev);
struct ds1216_regs regs;
ds1216_switch_ds_to_clock(priv->ioaddr);
ds1216_read(priv->ioaddr, (u8 *)®s);
tm->tm_sec = bcd2bin(regs.sec);
tm->tm_min = bcd2bin(regs.min);
if (regs.hour & DS1216_HOUR_1224) {
/* AM/PM mode */
tm->tm_hour = bcd2bin(regs.hour & 0x1f);
if (regs.hour & DS1216_HOUR_AMPM) tm->tm_hour += 12;
} else
tm->tm_hour = bcd2bin(regs.hour & 0x3f);
tm->tm_wday = (regs.wday & 7) - 1;
tm->tm_mday = bcd2bin(regs.mday & 0x3f);
tm->tm_mon = bcd2bin(regs.month & 0x1f);
tm->tm_year = bcd2bin(regs.year);
if (tm->tm_year < 70) tm->tm_year += 100;
return 0;
}<sep>@@
identifier e1;
expression e2;
type T;
@@
(
- T e1 = to_platform_device(e2);
...
- platform_get_drvdata(e1)
+ dev_get_drvdata(e2)
... when any
&
T e1 = to_platform_device(e2);
<... when != e1
(
platform_get_drvdata(e1)
|
- &e1->dev
+ e2
)
...>
)
<|end_of_text|> | 10,937 |
--- initial
+++ final
@@ -1,27 +1,26 @@
static int sh_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *wkalrm) {
- struct platform_device *pdev = to_platform_device(dev);
- struct sh_rtc *rtc = platform_get_drvdata(pdev);
+ struct sh_rtc *rtc = dev_get_drvdata(dev);
unsigned int rcr1;
struct rtc_time *tm = &wkalrm->time;
int mon;
spin_lock_irq(&rtc->lock);
/* disable alarm interrupt and clear the alarm flag */
rcr1 = readb(rtc->regbase + RCR1);
rcr1 &= ~(RCR1_AF | RCR1_AIE);
writeb(rcr1, rtc->regbase + RCR1);
/* set alarm time */
sh_rtc_write_alarm_value(rtc, tm->tm_sec, RSECAR);
sh_rtc_write_alarm_value(rtc, tm->tm_min, RMINAR);
sh_rtc_write_alarm_value(rtc, tm->tm_hour, RHRAR);
sh_rtc_write_alarm_value(rtc, tm->tm_wday, RWKAR);
sh_rtc_write_alarm_value(rtc, tm->tm_mday, RDAYAR);
mon = tm->tm_mon;
if (mon >= 0) mon += 1;
sh_rtc_write_alarm_value(rtc, mon, RMONAR);
if (wkalrm->enabled) {
rcr1 |= RCR1_AIE;
writeb(rcr1, rtc->regbase + RCR1);
}
spin_unlock_irq(&rtc->lock);
return 0;
}<sep>@@
identifier e1;
expression e2;
type T;
@@
(
- T e1 = to_platform_device(e2);
...
- platform_get_drvdata(e1)
+ dev_get_drvdata(e2)
... when any
&
T e1 = to_platform_device(e2);
<... when != e1
(
platform_get_drvdata(e1)
|
- &e1->dev
+ e2
)
...>
)
<|end_of_text|> | 10,986 |
--- initial
+++ final
@@ -1,17 +1,16 @@
static int fsldma_resume_early(struct device *dev) {
- struct platform_device *pdev = to_platform_device(dev);
- struct fsldma_device *fdev = platform_get_drvdata(pdev);
+ struct fsldma_device *fdev = dev_get_drvdata(dev);
struct fsldma_chan *chan;
u32 mode;
int i;
for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
chan = fdev->chan[i];
if (!chan) continue;
spin_lock_bh(&chan->desc_lock);
mode = chan->regs_save.mr & ~FSL_DMA_MR_CS & ~FSL_DMA_MR_CC & ~FSL_DMA_MR_CA;
set_mr(chan, mode);
chan->pm_state = RUNNING;
spin_unlock_bh(&chan->desc_lock);
}
return 0;
}<sep>@@
identifier e1;
expression e2;
type T;
@@
(
- T e1 = to_platform_device(e2);
...
- platform_get_drvdata(e1)
+ dev_get_drvdata(e2)
... when any
&
T e1 = to_platform_device(e2);
<... when != e1
(
platform_get_drvdata(e1)
|
- &e1->dev
+ e2
)
...>
)
<|end_of_text|> | 11,042 |
--- initial
+++ final
@@ -1,6 +1,5 @@
static int __maybe_unused hid_sensor_suspend(struct device *dev) {
- struct platform_device *pdev = to_platform_device(dev);
- struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct hid_sensor_common *attrb = iio_device_get_drvdata(indio_dev);
return _hid_sensor_power_state(attrb, false);
}<sep>@@
identifier e1;
expression e2;
type T;
@@
(
- T e1 = to_platform_device(e2);
...
- platform_get_drvdata(e1)
+ dev_get_drvdata(e2)
... when any
&
T e1 = to_platform_device(e2);
<... when != e1
(
platform_get_drvdata(e1)
|
- &e1->dev
+ e2
)
...>
)
<|end_of_text|> | 10,911 |
--- initial
+++ final
@@ -1,22 +1,21 @@
static int wdat_wdt_resume_noirq(struct device *dev) {
- struct platform_device *pdev = to_platform_device(dev);
- struct wdat_wdt *wdat = platform_get_drvdata(pdev);
+ struct wdat_wdt *wdat = dev_get_drvdata(dev);
int ret;
if (!watchdog_active(&wdat->wdd)) return 0;
if (!wdat->stopped) {
/*
* Looks like the boot firmware reinitializes the watchdog
* before it hands off to the OS on resume from sleep so we
* stop and reprogram the watchdog here.
*/
ret = wdat_wdt_stop(&wdat->wdd);
if (ret) return ret;
ret = wdat_wdt_set_timeout(&wdat->wdd, wdat->wdd.timeout);
if (ret) return ret;
ret = wdat_wdt_enable_reboot(wdat);
if (ret) return ret;
ret = wdat_wdt_ping(&wdat->wdd);
if (ret) return ret;
}
return wdat_wdt_start(&wdat->wdd);
}<sep>@@
identifier e1;
expression e2;
type T;
@@
(
- T e1 = to_platform_device(e2);
...
- platform_get_drvdata(e1)
+ dev_get_drvdata(e2)
... when any
&
T e1 = to_platform_device(e2);
<... when != e1
(
platform_get_drvdata(e1)
|
- &e1->dev
+ e2
)
...>
)
<|end_of_text|> | 11,032 |
--- initial
+++ final
@@ -1,17 +1,16 @@
static int wmt_mci_resume(struct device *dev) {
u32 reg_tmp;
- struct platform_device *pdev = to_platform_device(dev);
- struct mmc_host *mmc = platform_get_drvdata(pdev);
+ struct mmc_host *mmc = dev_get_drvdata(dev);
struct wmt_mci_priv *priv;
if (mmc) {
priv = mmc_priv(mmc);
clk_enable(priv->clk_sdmmc);
reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
writeb(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base + SDMMC_BUSMODE);
reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN);
writew(reg_tmp | (BLKL_GPI_CD | BLKL_INT_ENABLE), priv->sdmmc_base + SDMMC_BLKLEN);
reg_tmp = readb(priv->sdmmc_base + SDMMC_INTMASK0);
writeb(reg_tmp | INT0_DI_INT_EN, priv->sdmmc_base + SDMMC_INTMASK0);
}
return 0;
}<sep>@@
identifier e1;
expression e2;
type T;
@@
(
- T e1 = to_platform_device(e2);
...
- platform_get_drvdata(e1)
+ dev_get_drvdata(e2)
... when any
&
T e1 = to_platform_device(e2);
<... when != e1
(
platform_get_drvdata(e1)
|
- &e1->dev
+ e2
)
...>
)
<|end_of_text|> | 10,920 |
--- initial
+++ final
@@ -1,11 +1,10 @@
static int mv_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) {
- struct platform_device *pdev = to_platform_device(dev);
- struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+ struct rtc_plat_data *pdata = dev_get_drvdata(dev);
void __iomem *ioaddr = pdata->ioaddr;
if (pdata->irq < 0) return -EINVAL; /* fall back into rtc-dev's emulation */
if (enabled)
writel(1, ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS);
else
writel(0, ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS);
return 0;
}<sep>@@
identifier e1;
expression e2;
type T;
@@
(
- T e1 = to_platform_device(e2);
...
- platform_get_drvdata(e1)
+ dev_get_drvdata(e2)
... when any
&
T e1 = to_platform_device(e2);
<... when != e1
(
platform_get_drvdata(e1)
|
- &e1->dev
+ e2
)
...>
)
<|end_of_text|> | 10,971 |
--- initial
+++ final
@@ -1,19 +1,18 @@
static int atmel_xdmac_suspend(struct device *dev) {
- struct platform_device *pdev = to_platform_device(dev);
- struct at_xdmac *atxdmac = platform_get_drvdata(pdev);
+ struct at_xdmac *atxdmac = dev_get_drvdata(dev);
struct dma_chan *chan, *_chan;
list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
atchan->save_cc = at_xdmac_chan_read(atchan, AT_XDMAC_CC);
if (at_xdmac_chan_is_cyclic(atchan)) {
if (!at_xdmac_chan_is_paused(atchan)) at_xdmac_device_pause(chan);
atchan->save_cim = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
atchan->save_cnda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA);
atchan->save_cndc = at_xdmac_chan_read(atchan, AT_XDMAC_CNDC);
}
}
atxdmac->save_gim = at_xdmac_read(atxdmac, AT_XDMAC_GIM);
at_xdmac_off(atxdmac);
clk_disable_unprepare(atxdmac->clk);
return 0;
}<sep>@@
identifier e1;
expression e2;
type T;
@@
(
- T e1 = to_platform_device(e2);
...
- platform_get_drvdata(e1)
+ dev_get_drvdata(e2)
... when any
&
T e1 = to_platform_device(e2);
<... when != e1
(
platform_get_drvdata(e1)
|
- &e1->dev
+ e2
)
...>
)
<|end_of_text|> | 11,041 |
--- initial
+++ final
@@ -1,7 +1,6 @@
static ssize_t arm_spe_pmu_cap_show(struct device *dev, struct device_attribute *attr, char *buf) {
- struct platform_device *pdev = to_platform_device(dev);
- struct arm_spe_pmu *spe_pmu = platform_get_drvdata(pdev);
+ struct arm_spe_pmu *spe_pmu = dev_get_drvdata(dev);
struct dev_ext_attribute *ea = container_of(attr, struct dev_ext_attribute, attr);
int cap = (long)ea->var;
return snprintf(buf, PAGE_SIZE, "%u\n", arm_spe_pmu_cap_get(spe_pmu, cap));
}<sep>@@
identifier e1;
expression e2;
type T;
@@
(
- T e1 = to_platform_device(e2);
...
- platform_get_drvdata(e1)
+ dev_get_drvdata(e2)
... when any
&
T e1 = to_platform_device(e2);
<... when != e1
(
platform_get_drvdata(e1)
|
- &e1->dev
+ e2
)
...>
)
<|end_of_text|> | 10,925 |
--- initial
+++ final
@@ -1,5 +1,4 @@
static ssize_t arm_spe_pmu_get_attr_cpumask(struct device *dev, struct device_attribute *attr, char *buf) {
- struct platform_device *pdev = to_platform_device(dev);
- struct arm_spe_pmu *spe_pmu = platform_get_drvdata(pdev);
+ struct arm_spe_pmu *spe_pmu = dev_get_drvdata(dev);
return cpumap_print_to_pagebuf(true, buf, &spe_pmu->supported_cpus);
}<sep>@@
identifier e1;
expression e2;
type T;
@@
(
- T e1 = to_platform_device(e2);
...
- platform_get_drvdata(e1)
+ dev_get_drvdata(e2)
... when any
&
T e1 = to_platform_device(e2);
<... when != e1
(
platform_get_drvdata(e1)
|
- &e1->dev
+ e2
)
...>
)
<|end_of_text|> | 10,926 |
--- initial
+++ final
@@ -1,9 +1,8 @@
static ssize_t ds1685_rtc_sysfs_auxbatt_show(struct device *dev, struct device_attribute *attr, char *buf) {
- struct platform_device *pdev = to_platform_device(dev);
- struct ds1685_priv *rtc = platform_get_drvdata(pdev);
+ struct ds1685_priv *rtc = dev_get_drvdata(dev);
u8 ctrl4a;
ds1685_rtc_switch_to_bank1(rtc);
ctrl4a = rtc->read(rtc, RTC_EXT_CTRL_4A);
ds1685_rtc_switch_to_bank0(rtc);
return sprintf(buf, "%s\n", (ctrl4a & RTC_CTRL_4A_VRT2) ? "ok" : "not ok or N/A");
}<sep>@@
identifier e1;
expression e2;
type T;
@@
(
- T e1 = to_platform_device(e2);
...
- platform_get_drvdata(e1)
+ dev_get_drvdata(e2)
... when any
&
T e1 = to_platform_device(e2);
<... when != e1
(
platform_get_drvdata(e1)
|
- &e1->dev
+ e2
)
...>
)
<|end_of_text|> | 10,951 |
--- initial
+++ final
@@ -1,10 +1,9 @@
static int imx_uart_resume(struct device *dev) {
- struct platform_device *pdev = to_platform_device(dev);
- struct imx_port *sport = platform_get_drvdata(pdev);
+ struct imx_port *sport = dev_get_drvdata(dev);
/* disable wakeup from i.MX UART */
imx_uart_enable_wakeup(sport, false);
uart_resume_port(&imx_uart_uart_driver, &sport->port);
enable_irq(sport->port.irq);
clk_disable_unprepare(sport->clk_ipg);
return 0;
}<sep>@@
identifier e1;
expression e2;
type T;
@@
(
- T e1 = to_platform_device(e2);
...
- platform_get_drvdata(e1)
+ dev_get_drvdata(e2)
... when any
&
T e1 = to_platform_device(e2);
<... when != e1
(
platform_get_drvdata(e1)
|
- &e1->dev
+ e2
)
...>
)
<|end_of_text|> | 11,009 |
--- initial
+++ final
@@ -1,7 +1,6 @@
static int sh_mobile_lcdc_runtime_suspend(struct device *dev) {
- struct platform_device *pdev = to_platform_device(dev);
- struct sh_mobile_lcdc_priv *priv = platform_get_drvdata(pdev);
+ struct sh_mobile_lcdc_priv *priv = dev_get_drvdata(dev);
/* turn off LCDC hardware */
lcdc_write(priv, _LDCNT1R, 0);
return 0;
}<sep>@@
identifier e1;
expression e2;
type T;
@@
(
- T e1 = to_platform_device(e2);
...
- platform_get_drvdata(e1)
+ dev_get_drvdata(e2)
... when any
&
T e1 = to_platform_device(e2);
<... when != e1
(
platform_get_drvdata(e1)
|
- &e1->dev
+ e2
)
...>
)
<|end_of_text|> | 11,064 |
--- initial
+++ final
@@ -1,19 +1,18 @@
static int vpfe_suspend(struct device *dev) {
- struct platform_device *pdev = to_platform_device(dev);
- struct vpfe_device *vpfe = platform_get_drvdata(pdev);
+ struct vpfe_device *vpfe = dev_get_drvdata(dev);
struct vpfe_ccdc *ccdc = &vpfe->ccdc;
/* if streaming has not started we don't care */
if (!vb2_start_streaming_called(&vpfe->buffer_queue)) return 0;
pm_runtime_get_sync(dev);
vpfe_config_enable(ccdc, 1);
/* Save VPFE context */
vpfe_save_context(ccdc);
/* Disable CCDC */
vpfe_pcr_enable(ccdc, 0);
vpfe_config_enable(ccdc, 0);
/* Disable both master and slave clock */
pm_runtime_put_sync(dev);
/* Select sleep pin state */
pinctrl_pm_select_sleep_state(dev);
return 0;
}<sep>@@
identifier e1;
expression e2;
type T;
@@
(
- T e1 = to_platform_device(e2);
...
- platform_get_drvdata(e1)
+ dev_get_drvdata(e2)
... when any
&
T e1 = to_platform_device(e2);
<... when != e1
(
platform_get_drvdata(e1)
|
- &e1->dev
+ e2
)
...>
)
<|end_of_text|> | 10,915 |
--- initial
+++ final
@@ -1,5 +1,4 @@
static int __maybe_unused qcom_iommu_resume(struct device *dev) {
- struct platform_device *pdev = to_platform_device(dev);
- struct qcom_iommu_dev *qcom_iommu = platform_get_drvdata(pdev);
+ struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev);
return qcom_iommu_enable_clocks(qcom_iommu);
}<sep>@@
identifier e1;
expression e2;
type T;
@@
(
- T e1 = to_platform_device(e2);
...
- platform_get_drvdata(e1)
+ dev_get_drvdata(e2)
... when any
&
T e1 = to_platform_device(e2);
<... when != e1
(
platform_get_drvdata(e1)
|
- &e1->dev
+ e2
)
...>
)
<|end_of_text|> | 10,912 |
--- initial
+++ final
@@ -1,7 +1,6 @@
static int __maybe_unused zynqmp_qspi_suspend(struct device *dev) {
- struct platform_device *pdev = to_platform_device(dev);
- struct spi_master *master = platform_get_drvdata(pdev);
+ struct spi_master *master = dev_get_drvdata(dev);
spi_master_suspend(master);
zynqmp_unprepare_transfer_hardware(master);
return 0;
}<sep>@@
identifier e1;
expression e2;
type T;
@@
(
- T e1 = to_platform_device(e2);
...
- platform_get_drvdata(e1)
+ dev_get_drvdata(e2)
... when any
&
T e1 = to_platform_device(e2);
<... when != e1
(
platform_get_drvdata(e1)
|
- &e1->dev
+ e2
)
...>
)
<|end_of_text|> | 11,000 |
--- initial
+++ final
@@ -1,17 +1,16 @@
static int vpfe_resume(struct device *dev) {
- struct platform_device *pdev = to_platform_device(dev);
- struct vpfe_device *vpfe = platform_get_drvdata(pdev);
+ struct vpfe_device *vpfe = dev_get_drvdata(dev);
struct vpfe_ccdc *ccdc = &vpfe->ccdc;
/* if streaming has not started we don't care */
if (!vb2_start_streaming_called(&vpfe->buffer_queue)) return 0;
/* Enable both master and slave clock */
pm_runtime_get_sync(dev);
vpfe_config_enable(ccdc, 1);
/* Restore VPFE context */
vpfe_restore_context(ccdc);
vpfe_config_enable(ccdc, 0);
pm_runtime_put_sync(dev);
/* Select default pin state */
pinctrl_pm_select_default_state(dev);
return 0;
}<sep>@@
identifier e1;
expression e2;
type T;
@@
(
- T e1 = to_platform_device(e2);
...
- platform_get_drvdata(e1)
+ dev_get_drvdata(e2)
... when any
&
T e1 = to_platform_device(e2);
<... when != e1
(
platform_get_drvdata(e1)
|
- &e1->dev
+ e2
)
...>
)
<|end_of_text|> | 10,914 |
--- initial
+++ final
@@ -1,18 +1,17 @@
static int __maybe_unused cros_ec_sensors_prepare(struct device *dev) {
- struct platform_device *pdev = to_platform_device(dev);
- struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct cros_ec_sensors_core_state *st = iio_priv(indio_dev);
if (st->curr_sampl_freq == 0) return 0;
/*
* If the sensors are sampled at high frequency, we will not be able to
* sleep. Set sampling to a long period if necessary.
*/
if (st->curr_sampl_freq < CROS_EC_MIN_SUSPEND_SAMPLING_FREQUENCY) {
mutex_lock(&st->cmd_lock);
st->param.cmd = MOTIONSENSE_CMD_EC_RATE;
st->param.ec_rate.data = CROS_EC_MIN_SUSPEND_SAMPLING_FREQUENCY;
cros_ec_motion_send_host_cmd(st, 0);
mutex_unlock(&st->cmd_lock);
}
return 0;
}<sep>@@
identifier e1;
expression e2;
type T;
@@
(
- T e1 = to_platform_device(e2);
...
- platform_get_drvdata(e1)
+ dev_get_drvdata(e2)
... when any
&
T e1 = to_platform_device(e2);
<... when != e1
(
platform_get_drvdata(e1)
|
- &e1->dev
+ e2
)
...>
)
<|end_of_text|> | 10,908 |
--- initial
+++ final
@@ -1,26 +1,26 @@
static int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy) {
u32 usbcfg;
int retval = 0;
if ((hsotg->params.speed == DWC2_SPEED_PARAM_FULL || hsotg->params.speed == DWC2_SPEED_PARAM_LOW) && hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS) {
/* If FS/LS mode with FS/LS PHY */
retval = dwc2_fs_phy_init(hsotg, select_phy);
if (retval) return retval;
} else {
/* High speed PHY */
retval = dwc2_hs_phy_init(hsotg, select_phy);
if (retval) return retval;
}
if (hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI && hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED && hsotg->params.ulpi_fs_ls) {
dev_dbg(hsotg->dev, "Setting ULPI FSLS\n");
- usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
+ usbcfg = dwc2_readl(hsotg, GUSBCFG);
usbcfg |= GUSBCFG_ULPI_FS_LS;
usbcfg |= GUSBCFG_ULPI_CLK_SUSP_M;
- dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
+ dwc2_writel(hsotg, usbcfg, GUSBCFG);
} else {
- usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
+ usbcfg = dwc2_readl(hsotg, GUSBCFG);
usbcfg &= ~GUSBCFG_ULPI_FS_LS;
usbcfg &= ~GUSBCFG_ULPI_CLK_SUSP_M;
- dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
+ dwc2_writel(hsotg, usbcfg, GUSBCFG);
}
return retval;
}<sep>@@
expression hsotg,e;
@@
- dwc2_readl(hsotg->regs + e)
+ dwc2_readl(hsotg, e)
@@
expression hsotg,e,v;
@@
- dwc2_writel(v, hsotg->regs + e)
+ dwc2_writel(hsotg, v, e)
<|end_of_text|> | 11,531 |
--- initial
+++ final
@@ -1,14 +1,14 @@
static u32 dwc2_gadget_read_ep_interrupts(struct dwc2_hsotg *hsotg, unsigned int idx, int dir_in) {
u32 epmsk_reg = dir_in ? DIEPMSK : DOEPMSK;
u32 epint_reg = dir_in ? DIEPINT(idx) : DOEPINT(idx);
u32 ints;
u32 mask;
u32 diepempmsk;
- mask = dwc2_readl(hsotg->regs + epmsk_reg);
- diepempmsk = dwc2_readl(hsotg->regs + DIEPEMPMSK);
+ mask = dwc2_readl(hsotg, epmsk_reg);
+ diepempmsk = dwc2_readl(hsotg, DIEPEMPMSK);
mask |= ((diepempmsk >> idx) & 0x1) ? DIEPMSK_TXFIFOEMPTY : 0;
mask |= DXEPINT_SETUP_RCVD;
- ints = dwc2_readl(hsotg->regs + epint_reg);
+ ints = dwc2_readl(hsotg, epint_reg);
ints &= mask;
return ints;
}<sep>@@
expression hsotg,e;
@@
- dwc2_readl(hsotg->regs + e)
+ dwc2_readl(hsotg, e)
<|end_of_text|> | 11,450 |
--- initial
+++ final
@@ -1,9 +1,9 @@
static void dwc2_complete_periodic_xfer(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan, int chnum, struct dwc2_qtd *qtd, enum dwc2_halt_status halt_status) {
- u32 hctsiz = dwc2_readl(hsotg->regs + HCTSIZ(chnum));
+ u32 hctsiz = dwc2_readl(hsotg, HCTSIZ(chnum));
qtd->error_count = 0;
if (!chan->ep_is_in || (hctsiz & TSIZ_PKTCNT_MASK) == 0) /* Core halts channel in these cases */
dwc2_release_channel(hsotg, chan, qtd, halt_status);
else
/* Flush any outstanding requests from the Tx queue */
dwc2_halt_channel(hsotg, chan, qtd, halt_status);
}<sep>@@
expression hsotg,e;
@@
- dwc2_readl(hsotg->regs + e)
+ dwc2_readl(hsotg, e)
<|end_of_text|> | 11,538 |
--- initial
+++ final
@@ -1,54 +1,54 @@
static void dwc2_hc_ahberr_intr(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan, int chnum, struct dwc2_qtd *qtd) {
struct dwc2_hcd_urb *urb = qtd->urb;
char *pipetype, *speed;
u32 hcchar;
u32 hcsplt;
u32 hctsiz;
u32 hc_dma;
dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: AHB Error--\n", chnum);
if (!urb) goto handle_ahberr_halt;
dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
- hcchar = dwc2_readl(hsotg->regs + HCCHAR(chnum));
- hcsplt = dwc2_readl(hsotg->regs + HCSPLT(chnum));
- hctsiz = dwc2_readl(hsotg->regs + HCTSIZ(chnum));
- hc_dma = dwc2_readl(hsotg->regs + HCDMA(chnum));
+ hcchar = dwc2_readl(hsotg, HCCHAR(chnum));
+ hcsplt = dwc2_readl(hsotg, HCSPLT(chnum));
+ hctsiz = dwc2_readl(hsotg, HCTSIZ(chnum));
+ hc_dma = dwc2_readl(hsotg, HCDMA(chnum));
dev_err(hsotg->dev, "AHB ERROR, Channel %d\n", chnum);
dev_err(hsotg->dev, " hcchar 0x%08x, hcsplt 0x%08x\n", hcchar, hcsplt);
dev_err(hsotg->dev, " hctsiz 0x%08x, hc_dma 0x%08x\n", hctsiz, hc_dma);
dev_err(hsotg->dev, " Device address: %d\n", dwc2_hcd_get_dev_addr(&urb->pipe_info));
dev_err(hsotg->dev, " Endpoint: %d, %s\n", dwc2_hcd_get_ep_num(&urb->pipe_info), dwc2_hcd_is_pipe_in(&urb->pipe_info) ? "IN" : "OUT");
switch (dwc2_hcd_get_pipe_type(&urb->pipe_info)) {
case USB_ENDPOINT_XFER_CONTROL: pipetype = "CONTROL"; break;
case USB_ENDPOINT_XFER_BULK: pipetype = "BULK"; break;
case USB_ENDPOINT_XFER_INT: pipetype = "INTERRUPT"; break;
case USB_ENDPOINT_XFER_ISOC: pipetype = "ISOCHRONOUS"; break;
default: pipetype = "UNKNOWN"; break;
}
dev_err(hsotg->dev, " Endpoint type: %s\n", pipetype);
switch (chan->speed) {
case USB_SPEED_HIGH: speed = "HIGH"; break;
case USB_SPEED_FULL: speed = "FULL"; break;
case USB_SPEED_LOW: speed = "LOW"; break;
default: speed = "UNKNOWN"; break;
}
dev_err(hsotg->dev, " Speed: %s\n", speed);
dev_err(hsotg->dev, " Max packet size: %d\n", dwc2_hcd_get_mps(&urb->pipe_info));
dev_err(hsotg->dev, " Data buffer length: %d\n", urb->length);
dev_err(hsotg->dev, " Transfer buffer: %p, Transfer DMA: %08lx\n", urb->buf, (unsigned long)urb->dma);
dev_err(hsotg->dev, " Setup buffer: %p, Setup DMA: %08lx\n", urb->setup_packet, (unsigned long)urb->setup_dma);
dev_err(hsotg->dev, " Interval: %d\n", urb->interval);
/* Core halts the channel for Descriptor DMA mode */
if (hsotg->params.dma_desc_enable) {
dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum, DWC2_HC_XFER_AHB_ERR);
goto handle_ahberr_done;
}
dwc2_host_complete(hsotg, qtd, -EIO);
handle_ahberr_halt:
/*
* Force a channel halt. Don't call dwc2_halt_channel because that won't
* write to the HCCHARn register in DMA mode to force the halt.
*/
dwc2_hc_halt(hsotg, chan, DWC2_HC_XFER_AHB_ERR);
handle_ahberr_done:
disable_hc_int(hsotg, chnum, HCINTMSK_AHBERR);
}<sep>@@
expression hsotg,e;
@@
- dwc2_readl(hsotg->regs + e)
+ dwc2_readl(hsotg, e)
<|end_of_text|> | 11,543 |
--- initial
+++ final
@@ -1,82 +1,82 @@
void dwc2_hcd_dump_state(struct dwc2_hsotg *hsotg) {
#ifdef DEBUG
struct dwc2_host_chan *chan;
struct dwc2_hcd_urb *urb;
struct dwc2_qtd *qtd;
int num_channels;
u32 np_tx_status;
u32 p_tx_status;
int i;
num_channels = hsotg->params.host_channels;
dev_dbg(hsotg->dev, "\n");
dev_dbg(hsotg->dev, "************************************************************\n");
dev_dbg(hsotg->dev, "HCD State:\n");
dev_dbg(hsotg->dev, " Num channels: %d\n", num_channels);
for (i = 0; i < num_channels; i++) {
chan = hsotg->hc_ptr_array[i];
dev_dbg(hsotg->dev, " Channel %d:\n", i);
dev_dbg(hsotg->dev, " dev_addr: %d, ep_num: %d, ep_is_in: %d\n", chan->dev_addr, chan->ep_num, chan->ep_is_in);
dev_dbg(hsotg->dev, " speed: %d\n", chan->speed);
dev_dbg(hsotg->dev, " ep_type: %d\n", chan->ep_type);
dev_dbg(hsotg->dev, " max_packet: %d\n", chan->max_packet);
dev_dbg(hsotg->dev, " data_pid_start: %d\n", chan->data_pid_start);
dev_dbg(hsotg->dev, " multi_count: %d\n", chan->multi_count);
dev_dbg(hsotg->dev, " xfer_started: %d\n", chan->xfer_started);
dev_dbg(hsotg->dev, " xfer_buf: %p\n", chan->xfer_buf);
dev_dbg(hsotg->dev, " xfer_dma: %08lx\n", (unsigned long)chan->xfer_dma);
dev_dbg(hsotg->dev, " xfer_len: %d\n", chan->xfer_len);
dev_dbg(hsotg->dev, " xfer_count: %d\n", chan->xfer_count);
dev_dbg(hsotg->dev, " halt_on_queue: %d\n", chan->halt_on_queue);
dev_dbg(hsotg->dev, " halt_pending: %d\n", chan->halt_pending);
dev_dbg(hsotg->dev, " halt_status: %d\n", chan->halt_status);
dev_dbg(hsotg->dev, " do_split: %d\n", chan->do_split);
dev_dbg(hsotg->dev, " complete_split: %d\n", chan->complete_split);
dev_dbg(hsotg->dev, " hub_addr: %d\n", chan->hub_addr);
dev_dbg(hsotg->dev, " hub_port: %d\n", chan->hub_port);
dev_dbg(hsotg->dev, " xact_pos: %d\n", chan->xact_pos);
dev_dbg(hsotg->dev, " requests: %d\n", chan->requests);
dev_dbg(hsotg->dev, " qh: %p\n", chan->qh);
if (chan->xfer_started) {
u32 hfnum, hcchar, hctsiz, hcint, hcintmsk;
- hfnum = dwc2_readl(hsotg->regs + HFNUM);
- hcchar = dwc2_readl(hsotg->regs + HCCHAR(i));
- hctsiz = dwc2_readl(hsotg->regs + HCTSIZ(i));
- hcint = dwc2_readl(hsotg->regs + HCINT(i));
- hcintmsk = dwc2_readl(hsotg->regs + HCINTMSK(i));
+ hfnum = dwc2_readl(hsotg, HFNUM);
+ hcchar = dwc2_readl(hsotg, HCCHAR(i));
+ hctsiz = dwc2_readl(hsotg, HCTSIZ(i));
+ hcint = dwc2_readl(hsotg, HCINT(i));
+ hcintmsk = dwc2_readl(hsotg, HCINTMSK(i));
dev_dbg(hsotg->dev, " hfnum: 0x%08x\n", hfnum);
dev_dbg(hsotg->dev, " hcchar: 0x%08x\n", hcchar);
dev_dbg(hsotg->dev, " hctsiz: 0x%08x\n", hctsiz);
dev_dbg(hsotg->dev, " hcint: 0x%08x\n", hcint);
dev_dbg(hsotg->dev, " hcintmsk: 0x%08x\n", hcintmsk);
}
if (!(chan->xfer_started && chan->qh)) continue;
list_for_each_entry(qtd, &chan->qh->qtd_list, qtd_list_entry) {
if (!qtd->in_process) break;
urb = qtd->urb;
dev_dbg(hsotg->dev, " URB Info:\n");
dev_dbg(hsotg->dev, " qtd: %p, urb: %p\n", qtd, urb);
if (urb) {
dev_dbg(hsotg->dev, " Dev: %d, EP: %d %s\n", dwc2_hcd_get_dev_addr(&urb->pipe_info), dwc2_hcd_get_ep_num(&urb->pipe_info), dwc2_hcd_is_pipe_in(&urb->pipe_info) ? "IN" : "OUT");
dev_dbg(hsotg->dev, " Max packet size: %d\n", dwc2_hcd_get_mps(&urb->pipe_info));
dev_dbg(hsotg->dev, " transfer_buffer: %p\n", urb->buf);
dev_dbg(hsotg->dev, " transfer_dma: %08lx\n", (unsigned long)urb->dma);
dev_dbg(hsotg->dev, " transfer_buffer_length: %d\n", urb->length);
dev_dbg(hsotg->dev, " actual_length: %d\n", urb->actual_length);
}
}
}
dev_dbg(hsotg->dev, " non_periodic_channels: %d\n", hsotg->non_periodic_channels);
dev_dbg(hsotg->dev, " periodic_channels: %d\n", hsotg->periodic_channels);
dev_dbg(hsotg->dev, " periodic_usecs: %d\n", hsotg->periodic_usecs);
- np_tx_status = dwc2_readl(hsotg->regs + GNPTXSTS);
+ np_tx_status = dwc2_readl(hsotg, GNPTXSTS);
dev_dbg(hsotg->dev, " NP Tx Req Queue Space Avail: %d\n", (np_tx_status & TXSTS_QSPCAVAIL_MASK) >> TXSTS_QSPCAVAIL_SHIFT);
dev_dbg(hsotg->dev, " NP Tx FIFO Space Avail: %d\n", (np_tx_status & TXSTS_FSPCAVAIL_MASK) >> TXSTS_FSPCAVAIL_SHIFT);
- p_tx_status = dwc2_readl(hsotg->regs + HPTXSTS);
+ p_tx_status = dwc2_readl(hsotg, HPTXSTS);
dev_dbg(hsotg->dev, " P Tx Req Queue Space Avail: %d\n", (p_tx_status & TXSTS_QSPCAVAIL_MASK) >> TXSTS_QSPCAVAIL_SHIFT);
dev_dbg(hsotg->dev, " P Tx FIFO Space Avail: %d\n", (p_tx_status & TXSTS_FSPCAVAIL_MASK) >> TXSTS_FSPCAVAIL_SHIFT);
dwc2_dump_global_registers(hsotg);
dwc2_dump_host_registers(hsotg);
dev_dbg(hsotg->dev, "************************************************************\n");
dev_dbg(hsotg->dev, "\n");
#endif
}<sep>@@
expression hsotg,e;
@@
- dwc2_readl(hsotg->regs + e)
+ dwc2_readl(hsotg, e)
<|end_of_text|> | 11,516 |
--- initial
+++ final
@@ -1,37 +1,37 @@
static void dwc2_handle_wakeup_detected_intr(struct dwc2_hsotg *hsotg) {
int ret;
/* Clear interrupt */
- dwc2_writel(GINTSTS_WKUPINT, hsotg->regs + GINTSTS);
+ dwc2_writel(hsotg, GINTSTS_WKUPINT, GINTSTS);
dev_dbg(hsotg->dev, "++Resume or Remote Wakeup Detected Interrupt++\n");
dev_dbg(hsotg->dev, "%s lxstate = %d\n", __func__, hsotg->lx_state);
if (hsotg->lx_state == DWC2_L1) {
dwc2_wakeup_from_lpm_l1(hsotg);
return;
}
if (dwc2_is_device_mode(hsotg)) {
- dev_dbg(hsotg->dev, "DSTS=0x%0x\n", dwc2_readl(hsotg->regs + DSTS));
+ dev_dbg(hsotg->dev, "DSTS=0x%0x\n", dwc2_readl(hsotg, DSTS));
if (hsotg->lx_state == DWC2_L2) {
- u32 dctl = dwc2_readl(hsotg->regs + DCTL);
+ u32 dctl = dwc2_readl(hsotg, DCTL);
/* Clear Remote Wakeup Signaling */
dctl &= ~DCTL_RMTWKUPSIG;
- dwc2_writel(dctl, hsotg->regs + DCTL);
+ dwc2_writel(hsotg, dctl, DCTL);
ret = dwc2_exit_partial_power_down(hsotg, true);
if (ret && (ret != -ENOTSUPP)) dev_err(hsotg->dev, "exit power_down failed\n");
call_gadget(hsotg, resume);
}
/* Change to L0 state */
hsotg->lx_state = DWC2_L0;
} else {
if (hsotg->params.power_down) return;
if (hsotg->lx_state != DWC2_L1) {
- u32 pcgcctl = dwc2_readl(hsotg->regs + PCGCTL);
+ u32 pcgcctl = dwc2_readl(hsotg, PCGCTL);
/* Restart the Phy Clock */
pcgcctl &= ~PCGCTL_STOPPCLK;
- dwc2_writel(pcgcctl, hsotg->regs + PCGCTL);
+ dwc2_writel(hsotg, pcgcctl, PCGCTL);
mod_timer(&hsotg->wkp_timer, jiffies + msecs_to_jiffies(71));
} else {
/* Change to L0 state */
hsotg->lx_state = DWC2_L0;
}
}
}<sep>@@
expression hsotg,e;
@@
- dwc2_readl(hsotg->regs + e)
+ dwc2_readl(hsotg, e)
@@
expression hsotg,e,v;
@@
- dwc2_writel(v, hsotg->regs + e)
+ dwc2_writel(hsotg, v, e)
<|end_of_text|> | 11,434 |
--- initial
+++ final
@@ -1,56 +1,56 @@
static int dwc2_fs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy) {
u32 usbcfg, ggpio, i2cctl;
int retval = 0;
/*
* core_init() is now called on every switch so only call the
* following for the first time through
*/
if (select_phy) {
dev_dbg(hsotg->dev, "FS PHY selected\n");
- usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
+ usbcfg = dwc2_readl(hsotg, GUSBCFG);
if (!(usbcfg & GUSBCFG_PHYSEL)) {
usbcfg |= GUSBCFG_PHYSEL;
- dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
+ dwc2_writel(hsotg, usbcfg, GUSBCFG);
/* Reset after a PHY select */
retval = dwc2_core_reset(hsotg, false);
if (retval) {
dev_err(hsotg->dev, "%s: Reset failed, aborting", __func__);
return retval;
}
}
if (hsotg->params.activate_stm_fs_transceiver) {
- ggpio = dwc2_readl(hsotg->regs + GGPIO);
+ ggpio = dwc2_readl(hsotg, GGPIO);
if (!(ggpio & GGPIO_STM32_OTG_GCCFG_PWRDWN)) {
dev_dbg(hsotg->dev, "Activating transceiver\n");
/*
* STM32F4x9 uses the GGPIO register as general
* core configuration register.
*/
ggpio |= GGPIO_STM32_OTG_GCCFG_PWRDWN;
- dwc2_writel(ggpio, hsotg->regs + GGPIO);
+ dwc2_writel(hsotg, ggpio, GGPIO);
}
}
}
/*
* Program DCFG.DevSpd or HCFG.FSLSPclkSel to 48Mhz in FS. Also
* do this on HNP Dev/Host mode switches (done in dev_init and
* host_init).
*/
if (dwc2_is_host_mode(hsotg)) dwc2_init_fs_ls_pclk_sel(hsotg);
if (hsotg->params.i2c_enable) {
dev_dbg(hsotg->dev, "FS PHY enabling I2C\n");
/* Program GUSBCFG.OtgUtmiFsSel to I2C */
- usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
+ usbcfg = dwc2_readl(hsotg, GUSBCFG);
usbcfg |= GUSBCFG_OTG_UTMI_FS_SEL;
- dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
+ dwc2_writel(hsotg, usbcfg, GUSBCFG);
/* Program GI2CCTL.I2CEn */
- i2cctl = dwc2_readl(hsotg->regs + GI2CCTL);
+ i2cctl = dwc2_readl(hsotg, GI2CCTL);
i2cctl &= ~GI2CCTL_I2CDEVADDR_MASK;
i2cctl |= 1 << GI2CCTL_I2CDEVADDR_SHIFT;
i2cctl &= ~GI2CCTL_I2CEN;
- dwc2_writel(i2cctl, hsotg->regs + GI2CCTL);
+ dwc2_writel(hsotg, i2cctl, GI2CCTL);
i2cctl |= GI2CCTL_I2CEN;
- dwc2_writel(i2cctl, hsotg->regs + GI2CCTL);
+ dwc2_writel(hsotg, i2cctl, GI2CCTL);
}
return retval;
}<sep>@@
expression hsotg,e;
@@
- dwc2_readl(hsotg->regs + e)
+ dwc2_readl(hsotg, e)
@@
expression hsotg,e,v;
@@
- dwc2_writel(v, hsotg->regs + e)
+ dwc2_writel(hsotg, v, e)
<|end_of_text|> | 11,500 |
--- initial
+++ final
@@ -1,21 +1,21 @@
static void dwc2_gusbcfg_init(struct dwc2_hsotg *hsotg) {
u32 usbcfg;
- usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
+ usbcfg = dwc2_readl(hsotg, GUSBCFG);
usbcfg &= ~(GUSBCFG_HNPCAP | GUSBCFG_SRPCAP);
switch (hsotg->hw_params.op_mode) {
case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
if (hsotg->params.otg_cap == DWC2_CAP_PARAM_HNP_SRP_CAPABLE) usbcfg |= GUSBCFG_HNPCAP;
if (hsotg->params.otg_cap != DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE) usbcfg |= GUSBCFG_SRPCAP;
break;
case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
if (hsotg->params.otg_cap != DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE) usbcfg |= GUSBCFG_SRPCAP;
break;
case GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE:
case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE:
case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST:
default: break;
}
- dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
+ dwc2_writel(hsotg, usbcfg, GUSBCFG);
}<sep>@@
expression hsotg,e;
@@
- dwc2_readl(hsotg->regs + e)
+ dwc2_readl(hsotg, e)
@@
expression hsotg,e,v;
@@
- dwc2_writel(v, hsotg->regs + e)
+ dwc2_writel(hsotg, v, e)
<|end_of_text|> | 11,502 |
--- initial
+++ final
@@ -1,13 +1,13 @@
static void dwc2_enable_host_interrupts(struct dwc2_hsotg *hsotg) {
u32 intmsk;
dev_dbg(hsotg->dev, "%s()\n", __func__);
/* Disable all interrupts */
- dwc2_writel(0, hsotg->regs + GINTMSK);
- dwc2_writel(0, hsotg->regs + HAINTMSK);
+ dwc2_writel(hsotg, 0, GINTMSK);
+ dwc2_writel(hsotg, 0, HAINTMSK);
/* Enable the common interrupts */
dwc2_enable_common_interrupts(hsotg);
/* Enable host mode interrupts without disturbing common interrupts */
- intmsk = dwc2_readl(hsotg->regs + GINTMSK);
+ intmsk = dwc2_readl(hsotg, GINTMSK);
intmsk |= GINTSTS_DISCONNINT | GINTSTS_PRTINT | GINTSTS_HCHINT;
- dwc2_writel(intmsk, hsotg->regs + GINTMSK);
+ dwc2_writel(hsotg, intmsk, GINTMSK);
}<sep>@@
expression hsotg,e;
@@
- dwc2_readl(hsotg->regs + e)
+ dwc2_readl(hsotg, e)
@@
expression hsotg,e,v;
@@
- dwc2_writel(v, hsotg->regs + e)
+ dwc2_writel(hsotg, v, e)
<|end_of_text|> | 11,499 |
--- initial
+++ final
@@ -1,113 +1,113 @@
static void dwc2_hc_chhltd_intr_dma(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan, int chnum, struct dwc2_qtd *qtd) {
u32 hcintmsk;
int out_nak_enh = 0;
if (dbg_hc(chan)) dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: DMA Channel Halted--\n", chnum);
/*
* For core with OUT NAK enhancement, the flow for high-speed
* CONTROL/BULK OUT is handled a little differently
*/
if (hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_71a) {
if (chan->speed == USB_SPEED_HIGH && !chan->ep_is_in && (chan->ep_type == USB_ENDPOINT_XFER_CONTROL || chan->ep_type == USB_ENDPOINT_XFER_BULK)) { out_nak_enh = 1; }
}
if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE || (chan->halt_status == DWC2_HC_XFER_AHB_ERR && !hsotg->params.dma_desc_enable)) {
if (hsotg->params.dma_desc_enable)
dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum, chan->halt_status);
else
/*
* Just release the channel. A dequeue can happen on a
* transfer timeout. In the case of an AHB Error, the
* channel was forced to halt because there's no way to
* gracefully recover.
*/
dwc2_release_channel(hsotg, chan, qtd, chan->halt_status);
return;
}
- hcintmsk = dwc2_readl(hsotg->regs + HCINTMSK(chnum));
+ hcintmsk = dwc2_readl(hsotg, HCINTMSK(chnum));
if (chan->hcint & HCINTMSK_XFERCOMPL) {
/*
* Todo: This is here because of a possible hardware bug. Spec
* says that on SPLIT-ISOC OUT transfers in DMA mode that a HALT
* interrupt w/ACK bit set should occur, but I only see the
* XFERCOMP bit, even with it masked out. This is a workaround
* for that behavior. Should fix this when hardware is fixed.
*/
if (chan->ep_type == USB_ENDPOINT_XFER_ISOC && !chan->ep_is_in) dwc2_hc_ack_intr(hsotg, chan, chnum, qtd);
dwc2_hc_xfercomp_intr(hsotg, chan, chnum, qtd);
} else if (chan->hcint & HCINTMSK_STALL) {
dwc2_hc_stall_intr(hsotg, chan, chnum, qtd);
} else if ((chan->hcint & HCINTMSK_XACTERR) && !hsotg->params.dma_desc_enable) {
if (out_nak_enh) {
if (chan->hcint & (HCINTMSK_NYET | HCINTMSK_NAK | HCINTMSK_ACK)) {
dev_vdbg(hsotg->dev, "XactErr with NYET/NAK/ACK\n");
qtd->error_count = 0;
} else {
dev_vdbg(hsotg->dev, "XactErr without NYET/NAK/ACK\n");
}
}
/*
* Must handle xacterr before nak or ack. Could get a xacterr
* at the same time as either of these on a BULK/CONTROL OUT
* that started with a PING. The xacterr takes precedence.
*/
dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd);
} else if ((chan->hcint & HCINTMSK_XCS_XACT) && hsotg->params.dma_desc_enable) {
dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd);
} else if ((chan->hcint & HCINTMSK_AHBERR) && hsotg->params.dma_desc_enable) {
dwc2_hc_ahberr_intr(hsotg, chan, chnum, qtd);
} else if (chan->hcint & HCINTMSK_BBLERR) {
dwc2_hc_babble_intr(hsotg, chan, chnum, qtd);
} else if (chan->hcint & HCINTMSK_FRMOVRUN) {
dwc2_hc_frmovrun_intr(hsotg, chan, chnum, qtd);
} else if (!out_nak_enh) {
if (chan->hcint & HCINTMSK_NYET) {
/*
* Must handle nyet before nak or ack. Could get a nyet
* at the same time as either of those on a BULK/CONTROL
* OUT that started with a PING. The nyet takes
* precedence.
*/
dwc2_hc_nyet_intr(hsotg, chan, chnum, qtd);
} else if ((chan->hcint & HCINTMSK_NAK) && !(hcintmsk & HCINTMSK_NAK)) {
/*
* If nak is not masked, it's because a non-split IN
* transfer is in an error state. In that case, the nak
* is handled by the nak interrupt handler, not here.
* Handle nak here for BULK/CONTROL OUT transfers, which
* halt on a NAK to allow rewinding the buffer pointer.
*/
dwc2_hc_nak_intr(hsotg, chan, chnum, qtd);
} else if ((chan->hcint & HCINTMSK_ACK) && !(hcintmsk & HCINTMSK_ACK)) {
/*
* If ack is not masked, it's because a non-split IN
* transfer is in an error state. In that case, the ack
* is handled by the ack interrupt handler, not here.
* Handle ack here for split transfers. Start splits
* halt on ACK.
*/
dwc2_hc_ack_intr(hsotg, chan, chnum, qtd);
} else {
if (chan->ep_type == USB_ENDPOINT_XFER_INT || chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
/*
* A periodic transfer halted with no other
* channel interrupts set. Assume it was halted
* by the core because it could not be completed
* in its scheduled (micro)frame.
*/
dev_dbg(hsotg->dev, "%s: Halt channel %d (assume incomplete periodic transfer)\n", __func__, chnum);
dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_PERIODIC_INCOMPLETE);
} else {
dev_err(hsotg->dev, "%s: Channel %d - ChHltd set, but reason is unknown\n", __func__, chnum);
- dev_err(hsotg->dev, "hcint 0x%08x, intsts 0x%08x\n", chan->hcint, dwc2_readl(hsotg->regs + GINTSTS));
+ dev_err(hsotg->dev, "hcint 0x%08x, intsts 0x%08x\n", chan->hcint, dwc2_readl(hsotg, GINTSTS));
goto error;
}
}
} else {
dev_info(hsotg->dev, "NYET/NAK/ACK/other in non-error case, 0x%08x\n", chan->hcint);
error:
/* Failthrough: use 3-strikes rule */
qtd->error_count++;
dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb, qtd, DWC2_HC_XFER_XACT_ERR);
dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_XACT_ERR);
}
}<sep>@@
expression hsotg,e;
@@
- dwc2_readl(hsotg->regs + e)
+ dwc2_readl(hsotg, e)
<|end_of_text|> | 11,544 |
--- initial
+++ final
@@ -1,45 +1,45 @@
irqreturn_t dwc2_handle_common_intr(int irq, void *dev) {
struct dwc2_hsotg *hsotg = dev;
u32 gintsts;
irqreturn_t retval = IRQ_NONE;
spin_lock(&hsotg->lock);
if (!dwc2_is_controller_alive(hsotg)) {
dev_warn(hsotg->dev, "Controller is dead\n");
goto out;
}
/* Reading current frame number value in device or host modes. */
if (dwc2_is_device_mode(hsotg))
- hsotg->frame_number = (dwc2_readl(hsotg->regs + DSTS) & DSTS_SOFFN_MASK) >> DSTS_SOFFN_SHIFT;
+ hsotg->frame_number = (dwc2_readl(hsotg, DSTS) & DSTS_SOFFN_MASK) >> DSTS_SOFFN_SHIFT;
else
- hsotg->frame_number = (dwc2_readl(hsotg->regs + HFNUM) & HFNUM_FRNUM_MASK) >> HFNUM_FRNUM_SHIFT;
+ hsotg->frame_number = (dwc2_readl(hsotg, HFNUM) & HFNUM_FRNUM_MASK) >> HFNUM_FRNUM_SHIFT;
gintsts = dwc2_read_common_intr(hsotg);
if (gintsts & ~GINTSTS_PRTINT) retval = IRQ_HANDLED;
/* In case of hibernated state gintsts must not work */
if (hsotg->hibernated) {
dwc2_handle_gpwrdn_intr(hsotg);
retval = IRQ_HANDLED;
goto out;
}
if (gintsts & GINTSTS_MODEMIS) dwc2_handle_mode_mismatch_intr(hsotg);
if (gintsts & GINTSTS_OTGINT) dwc2_handle_otg_intr(hsotg);
if (gintsts & GINTSTS_CONIDSTSCHNG) dwc2_handle_conn_id_status_change_intr(hsotg);
if (gintsts & GINTSTS_DISCONNINT) dwc2_handle_disconnect_intr(hsotg);
if (gintsts & GINTSTS_SESSREQINT) dwc2_handle_session_req_intr(hsotg);
if (gintsts & GINTSTS_WKUPINT) dwc2_handle_wakeup_detected_intr(hsotg);
if (gintsts & GINTSTS_USBSUSP) dwc2_handle_usb_suspend_intr(hsotg);
if (gintsts & GINTSTS_LPMTRANRCVD) dwc2_handle_lpm_intr(hsotg);
if (gintsts & GINTSTS_PRTINT) {
/*
* The port interrupt occurs while in device mode with HPRT0
* Port Enable/Disable
*/
if (dwc2_is_device_mode(hsotg)) {
dev_dbg(hsotg->dev, " --Port interrupt received in Device mode--\n");
dwc2_handle_usb_port_intr(hsotg);
retval = IRQ_HANDLED;
}
}
out:
spin_unlock(&hsotg->lock);
return retval;
}<sep>@@
expression hsotg,e;
@@
- dwc2_readl(hsotg->regs + e)
+ dwc2_readl(hsotg, e)
<|end_of_text|> | 11,424 |
--- initial
+++ final
@@ -1,9 +1,9 @@
static void dwc2_hsotg_en_gsint(struct dwc2_hsotg *hsotg, u32 ints) {
- u32 gsintmsk = dwc2_readl(hsotg->regs + GINTMSK);
+ u32 gsintmsk = dwc2_readl(hsotg, GINTMSK);
u32 new_gsintmsk;
new_gsintmsk = gsintmsk | ints;
if (new_gsintmsk != gsintmsk) {
dev_dbg(hsotg->dev, "gsintmsk now 0x%08x\n", new_gsintmsk);
- dwc2_writel(new_gsintmsk, hsotg->regs + GINTMSK);
+ dwc2_writel(hsotg, new_gsintmsk, GINTMSK);
}
}<sep>@@
expression hsotg,e;
@@
- dwc2_readl(hsotg->regs + e)
+ dwc2_readl(hsotg, e)
@@
expression hsotg,e,v;
@@
- dwc2_writel(v, hsotg->regs + e)
+ dwc2_writel(hsotg, v, e)
<|end_of_text|> | 11,461 |
--- initial
+++ final
@@ -1,6 +1,6 @@
bool dwc2_is_controller_alive(struct dwc2_hsotg *hsotg) {
- if (dwc2_readl(hsotg->regs + GSNPSID) == 0xffffffff)
+ if (dwc2_readl(hsotg, GSNPSID) == 0xffffffff)
return false;
else
return true;
}<sep>@@
expression hsotg,e;
@@
- dwc2_readl(hsotg->regs + e)
+ dwc2_readl(hsotg, e)
<|end_of_text|> | 11,420 |
--- initial
+++ final
@@ -1,44 +1,44 @@
static void dwc2_hsotg_process_control(struct dwc2_hsotg *hsotg, struct usb_ctrlrequest *ctrl) {
struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0];
int ret = 0;
u32 dcfg;
dev_dbg(hsotg->dev, "ctrl Type=%02x, Req=%02x, V=%04x, I=%04x, L=%04x\n", ctrl->bRequestType, ctrl->bRequest, ctrl->wValue, ctrl->wIndex, ctrl->wLength);
if (ctrl->wLength == 0) {
ep0->dir_in = 1;
hsotg->ep0_state = DWC2_EP0_STATUS_IN;
} else if (ctrl->bRequestType & USB_DIR_IN) {
ep0->dir_in = 1;
hsotg->ep0_state = DWC2_EP0_DATA_IN;
} else {
ep0->dir_in = 0;
hsotg->ep0_state = DWC2_EP0_DATA_OUT;
}
if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
switch (ctrl->bRequest) {
case USB_REQ_SET_ADDRESS:
hsotg->connected = 1;
- dcfg = dwc2_readl(hsotg->regs + DCFG);
+ dcfg = dwc2_readl(hsotg, DCFG);
dcfg &= ~DCFG_DEVADDR_MASK;
dcfg |= (le16_to_cpu(ctrl->wValue) << DCFG_DEVADDR_SHIFT) & DCFG_DEVADDR_MASK;
- dwc2_writel(dcfg, hsotg->regs + DCFG);
+ dwc2_writel(hsotg, dcfg, DCFG);
dev_info(hsotg->dev, "new address %d\n", ctrl->wValue);
ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0);
return;
case USB_REQ_GET_STATUS: ret = dwc2_hsotg_process_req_status(hsotg, ctrl); break;
case USB_REQ_CLEAR_FEATURE:
case USB_REQ_SET_FEATURE: ret = dwc2_hsotg_process_req_feature(hsotg, ctrl); break;
}
}
/* as a fallback, try delivering it to the driver to deal with */
if (ret == 0 && hsotg->driver) {
spin_unlock(&hsotg->lock);
ret = hsotg->driver->setup(&hsotg->gadget, ctrl);
spin_lock(&hsotg->lock);
if (ret < 0) dev_dbg(hsotg->dev, "driver->setup() ret %d\n", ret);
}
/*
* the request is either unhandlable, or is not formatted correctly
* so respond with a STALL for the status stage to indicate failure.
*/
if (ret < 0) dwc2_hsotg_stall_ep0(hsotg);
}<sep>@@
expression hsotg,e;
@@
- dwc2_readl(hsotg->regs + e)
+ dwc2_readl(hsotg, e)
@@
expression hsotg,e,v;
@@
- dwc2_writel(v, hsotg->regs + e)
+ dwc2_writel(hsotg, v, e)
<|end_of_text|> | 11,474 |
--- initial
+++ final
@@ -1,5 +1,5 @@
static void dwc2_handle_mode_mismatch_intr(struct dwc2_hsotg *hsotg) {
/* Clear interrupt */
- dwc2_writel(GINTSTS_MODEMIS, hsotg->regs + GINTSTS);
+ dwc2_writel(hsotg, GINTSTS_MODEMIS, GINTSTS);
dev_warn(hsotg->dev, "Mode Mismatch Interrupt: currently in %s mode\n", dwc2_is_host_mode(hsotg) ? "Host" : "Device");
}<sep>@@
expression hsotg,e,v;
@@
- dwc2_writel(v, hsotg->regs + e)
+ dwc2_writel(hsotg, v, e)
<|end_of_text|> | 11,429 |
--- initial
+++ final
@@ -1,15 +1,15 @@
static u32 dwc2_read_common_intr(struct dwc2_hsotg *hsotg) {
u32 gintsts;
u32 gintmsk;
u32 gahbcfg;
u32 gintmsk_common = GINTMSK_COMMON;
- gintsts = dwc2_readl(hsotg->regs + GINTSTS);
- gintmsk = dwc2_readl(hsotg->regs + GINTMSK);
- gahbcfg = dwc2_readl(hsotg->regs + GAHBCFG);
+ gintsts = dwc2_readl(hsotg, GINTSTS);
+ gintmsk = dwc2_readl(hsotg, GINTMSK);
+ gahbcfg = dwc2_readl(hsotg, GAHBCFG);
/* If any common interrupts set */
if (gintsts & gintmsk_common) dev_dbg(hsotg->dev, "gintsts=%08x gintmsk=%08x\n", gintsts, gintmsk);
if (gahbcfg & GAHBCFG_GLBL_INTR_EN)
return gintsts & gintmsk & gintmsk_common;
else
return 0;
}<sep>@@
expression hsotg,e;
@@
- dwc2_readl(hsotg->regs + e)
+ dwc2_readl(hsotg, e)
<|end_of_text|> | 11,435 |
--- initial
+++ final
@@ -1,15 +1,15 @@
static void dwc2_init_fs_ls_pclk_sel(struct dwc2_hsotg *hsotg) {
u32 hcfg, val;
if ((hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI && hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED && hsotg->params.ulpi_fs_ls) || hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS) {
/* Full speed PHY */
val = HCFG_FSLSPCLKSEL_48_MHZ;
} else {
/* High speed PHY running at full speed or high speed */
val = HCFG_FSLSPCLKSEL_30_60_MHZ;
}
dev_dbg(hsotg->dev, "Initializing HCFG.FSLSPClkSel to %08x\n", val);
- hcfg = dwc2_readl(hsotg->regs + HCFG);
+ hcfg = dwc2_readl(hsotg, HCFG);
hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
hcfg |= val << HCFG_FSLSPCLKSEL_SHIFT;
- dwc2_writel(hcfg, hsotg->regs + HCFG);
+ dwc2_writel(hsotg, hcfg, HCFG);
}<sep>@@
expression hsotg,e;
@@
- dwc2_readl(hsotg->regs + e)
+ dwc2_readl(hsotg, e)
@@
expression hsotg,e,v;
@@
- dwc2_writel(v, hsotg->regs + e)
+ dwc2_writel(hsotg, v, e)
<|end_of_text|> | 11,530 |
--- initial
+++ final
@@ -1,33 +1,33 @@
void dwc2_hc_start_transfer_ddma(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan) {
u32 hcchar;
u32 hctsiz = 0;
if (chan->do_ping) hctsiz |= TSIZ_DOPNG;
if (chan->ep_type == USB_ENDPOINT_XFER_ISOC) dwc2_set_pid_isoc(chan);
/* Packet Count and Xfer Size are not used in Descriptor DMA mode */
hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT & TSIZ_SC_MC_PID_MASK;
/* 0 - 1 descriptor, 1 - 2 descriptors, etc */
hctsiz |= (chan->ntd - 1) << TSIZ_NTD_SHIFT & TSIZ_NTD_MASK;
/* Non-zero only for high-speed interrupt endpoints */
hctsiz |= chan->schinfo << TSIZ_SCHINFO_SHIFT & TSIZ_SCHINFO_MASK;
if (dbg_hc(chan)) {
dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__, chan->hc_num);
dev_vdbg(hsotg->dev, " Start PID: %d\n", chan->data_pid_start);
dev_vdbg(hsotg->dev, " NTD: %d\n", chan->ntd - 1);
}
- dwc2_writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num));
+ dwc2_writel(hsotg, hctsiz, HCTSIZ(chan->hc_num));
dma_sync_single_for_device(hsotg->dev, chan->desc_list_addr, chan->desc_list_sz, DMA_TO_DEVICE);
- dwc2_writel(chan->desc_list_addr, hsotg->regs + HCDMA(chan->hc_num));
+ dwc2_writel(hsotg, chan->desc_list_addr, HCDMA(chan->hc_num));
if (dbg_hc(chan)) dev_vdbg(hsotg->dev, "Wrote %pad to HCDMA(%d)\n", &chan->desc_list_addr, chan->hc_num);
- hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num));
+ hcchar = dwc2_readl(hsotg, HCCHAR(chan->hc_num));
hcchar &= ~HCCHAR_MULTICNT_MASK;
hcchar |= chan->multi_count << HCCHAR_MULTICNT_SHIFT & HCCHAR_MULTICNT_MASK;
if (hcchar & HCCHAR_CHDIS) dev_warn(hsotg->dev, "%s: chdis set, channel %d, hcchar 0x%08x\n", __func__, chan->hc_num, hcchar);
/* Set host channel enable after all other setup is complete */
hcchar |= HCCHAR_CHENA;
hcchar &= ~HCCHAR_CHDIS;
if (dbg_hc(chan)) dev_vdbg(hsotg->dev, " Multi Cnt: %d\n", (hcchar & HCCHAR_MULTICNT_MASK) >> HCCHAR_MULTICNT_SHIFT);
- dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
+ dwc2_writel(hsotg, hcchar, HCCHAR(chan->hc_num));
if (dbg_hc(chan)) dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar, chan->hc_num);
chan->xfer_started = 1;
chan->requests++;
}<sep>@@
expression hsotg,e;
@@
- dwc2_readl(hsotg->regs + e)
+ dwc2_readl(hsotg, e)
@@
expression hsotg,e,v;
@@
- dwc2_writel(v, hsotg->regs + e)
+ dwc2_writel(hsotg, v, e)
<|end_of_text|> | 11,513 |
--- initial
+++ final
@@ -1,11 +1,11 @@
static void dwc2_get_host_hwparams(struct dwc2_hsotg *hsotg) {
struct dwc2_hw_params *hw = &hsotg->hw_params;
u32 gnptxfsiz;
u32 hptxfsiz;
if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) return;
dwc2_force_mode(hsotg, true);
- gnptxfsiz = dwc2_readl(hsotg->regs + GNPTXFSIZ);
- hptxfsiz = dwc2_readl(hsotg->regs + HPTXFSIZ);
+ gnptxfsiz = dwc2_readl(hsotg, GNPTXFSIZ);
+ hptxfsiz = dwc2_readl(hsotg, HPTXFSIZ);
hw->host_nperio_tx_fifo_size = (gnptxfsiz & FIFOSIZE_DEPTH_MASK) >> FIFOSIZE_DEPTH_SHIFT;
hw->host_perio_tx_fifo_size = (hptxfsiz & FIFOSIZE_DEPTH_MASK) >> FIFOSIZE_DEPTH_SHIFT;
}<sep>@@
expression hsotg,e;
@@
- dwc2_readl(hsotg->regs + e)
+ dwc2_readl(hsotg, e)
<|end_of_text|> | 11,560 |
--- initial
+++ final
@@ -1,53 +1,53 @@
static void dwc2_hsotg_init_fifo(struct dwc2_hsotg *hsotg) {
unsigned int ep;
unsigned int addr;
int timeout;
u32 val;
u32 *txfsz = hsotg->params.g_tx_fifo_size;
/* Reset fifo map if not correctly cleared during previous session */
WARN_ON(hsotg->fifo_map);
hsotg->fifo_map = 0;
/* set RX/NPTX FIFO sizes */
- dwc2_writel(hsotg->params.g_rx_fifo_size, hsotg->regs + GRXFSIZ);
- dwc2_writel((hsotg->params.g_rx_fifo_size << FIFOSIZE_STARTADDR_SHIFT) | (hsotg->params.g_np_tx_fifo_size << FIFOSIZE_DEPTH_SHIFT), hsotg->regs + GNPTXFSIZ);
+ dwc2_writel(hsotg, hsotg->params.g_rx_fifo_size, GRXFSIZ);
+ dwc2_writel(hsotg, (hsotg->params.g_rx_fifo_size << FIFOSIZE_STARTADDR_SHIFT) | (hsotg->params.g_np_tx_fifo_size << FIFOSIZE_DEPTH_SHIFT), GNPTXFSIZ);
/*
* arange all the rest of the TX FIFOs, as some versions of this
* block have overlapping default addresses. This also ensures
* that if the settings have been changed, then they are set to
* known values.
*/
/* start at the end of the GNPTXFSIZ, rounded up */
addr = hsotg->params.g_rx_fifo_size + hsotg->params.g_np_tx_fifo_size;
/*
* Configure fifos sizes from provided configuration and assign
* them to endpoints dynamically according to maxpacket size value of
* given endpoint.
*/
for (ep = 1; ep < MAX_EPS_CHANNELS; ep++) {
if (!txfsz[ep]) continue;
val = addr;
val |= txfsz[ep] << FIFOSIZE_DEPTH_SHIFT;
WARN_ONCE(addr + txfsz[ep] > hsotg->fifo_mem, "insufficient fifo memory");
addr += txfsz[ep];
- dwc2_writel(val, hsotg->regs + DPTXFSIZN(ep));
- val = dwc2_readl(hsotg->regs + DPTXFSIZN(ep));
+ dwc2_writel(hsotg, val, DPTXFSIZN(ep));
+ val = dwc2_readl(hsotg, DPTXFSIZN(ep));
}
- dwc2_writel(hsotg->hw_params.total_fifo_size | addr << GDFIFOCFG_EPINFOBASE_SHIFT, hsotg->regs + GDFIFOCFG);
+ dwc2_writel(hsotg, hsotg->hw_params.total_fifo_size | addr << GDFIFOCFG_EPINFOBASE_SHIFT, GDFIFOCFG);
/*
* according to p428 of the design guide, we need to ensure that
* all fifos are flushed before continuing
*/
- dwc2_writel(GRSTCTL_TXFNUM(0x10) | GRSTCTL_TXFFLSH | GRSTCTL_RXFFLSH, hsotg->regs + GRSTCTL);
+ dwc2_writel(hsotg, GRSTCTL_TXFNUM(0x10) | GRSTCTL_TXFFLSH | GRSTCTL_RXFFLSH, GRSTCTL);
/* wait until the fifos are both flushed */
timeout = 100;
while (1) {
- val = dwc2_readl(hsotg->regs + GRSTCTL);
+ val = dwc2_readl(hsotg, GRSTCTL);
if ((val & (GRSTCTL_TXFFLSH | GRSTCTL_RXFFLSH)) == 0) break;
if (--timeout == 0) {
dev_err(hsotg->dev, "%s: timeout flushing fifos (GRSTCTL=%08x)\n", __func__, val);
break;
}
udelay(1);
}
dev_dbg(hsotg->dev, "FIFOs reset, timeout at %d\n", timeout);
}<sep>@@
expression hsotg,e;
@@
- dwc2_readl(hsotg->regs + e)
+ dwc2_readl(hsotg, e)
@@
expression hsotg,e,v;
@@
- dwc2_writel(v, hsotg->regs + e)
+ dwc2_writel(hsotg, v, e)
<|end_of_text|> | 11,470 |
--- initial
+++ final
@@ -1,14 +1,14 @@
void dwc2_hcd_start(struct dwc2_hsotg *hsotg) {
u32 hprt0;
if (hsotg->op_state == OTG_STATE_B_HOST) {
/*
* Reset the port. During a HNP mode switch the reset
* needs to occur within 1ms and have a duration of at
* least 50ms.
*/
hprt0 = dwc2_read_hprt0(hsotg);
hprt0 |= HPRT0_RST;
- dwc2_writel(hprt0, hsotg->regs + HPRT0);
+ dwc2_writel(hsotg, hprt0, HPRT0);
}
queue_delayed_work(hsotg->wq_otg, &hsotg->start_work, msecs_to_jiffies(50));
}<sep>@@
expression hsotg,e,v;
@@
- dwc2_writel(v, hsotg->regs + e)
+ dwc2_writel(hsotg, v, e)
<|end_of_text|> | 11,524 |
--- initial
+++ final
@@ -1,24 +1,24 @@
int dwc2_restore_global_registers(struct dwc2_hsotg *hsotg) {
struct dwc2_gregs_backup *gr;
dev_dbg(hsotg->dev, "%s\n", __func__);
/* Restore global regs */
gr = &hsotg->gr_backup;
if (!gr->valid) {
dev_err(hsotg->dev, "%s: no global registers to restore\n", __func__);
return -EINVAL;
}
gr->valid = false;
- dwc2_writel(0xffffffff, hsotg->regs + GINTSTS);
- dwc2_writel(gr->gotgctl, hsotg->regs + GOTGCTL);
- dwc2_writel(gr->gintmsk, hsotg->regs + GINTMSK);
- dwc2_writel(gr->gusbcfg, hsotg->regs + GUSBCFG);
- dwc2_writel(gr->gahbcfg, hsotg->regs + GAHBCFG);
- dwc2_writel(gr->grxfsiz, hsotg->regs + GRXFSIZ);
- dwc2_writel(gr->gnptxfsiz, hsotg->regs + GNPTXFSIZ);
- dwc2_writel(gr->gdfifocfg, hsotg->regs + GDFIFOCFG);
- dwc2_writel(gr->pcgcctl1, hsotg->regs + PCGCCTL1);
- dwc2_writel(gr->glpmcfg, hsotg->regs + GLPMCFG);
- dwc2_writel(gr->pcgcctl, hsotg->regs + PCGCTL);
- dwc2_writel(gr->gi2cctl, hsotg->regs + GI2CCTL);
+ dwc2_writel(hsotg, 0xffffffff, GINTSTS);
+ dwc2_writel(hsotg, gr->gotgctl, GOTGCTL);
+ dwc2_writel(hsotg, gr->gintmsk, GINTMSK);
+ dwc2_writel(hsotg, gr->gusbcfg, GUSBCFG);
+ dwc2_writel(hsotg, gr->gahbcfg, GAHBCFG);
+ dwc2_writel(hsotg, gr->grxfsiz, GRXFSIZ);
+ dwc2_writel(hsotg, gr->gnptxfsiz, GNPTXFSIZ);
+ dwc2_writel(hsotg, gr->gdfifocfg, GDFIFOCFG);
+ dwc2_writel(hsotg, gr->pcgcctl1, PCGCCTL1);
+ dwc2_writel(hsotg, gr->glpmcfg, GLPMCFG);
+ dwc2_writel(hsotg, gr->pcgcctl, PCGCTL);
+ dwc2_writel(hsotg, gr->gi2cctl, GI2CCTL);
return 0;
}<sep>@@
expression hsotg,e,v;
@@
- dwc2_writel(v, hsotg->regs + e)
+ dwc2_writel(hsotg, v, e)
<|end_of_text|> | 11,423 |
--- initial
+++ final
@@ -1,8 +1,8 @@
int dwc2_hsotg_wait_bit_clear(struct dwc2_hsotg *hsotg, u32 offset, u32 mask, u32 timeout) {
u32 i;
for (i = 0; i < timeout; i++) {
- if (!(dwc2_readl(hsotg->regs + offset) & mask)) return 0;
+ if (!(dwc2_readl(hsotg, offset) & mask)) return 0;
udelay(1);
}
return -ETIMEDOUT;
}<sep>@@
expression hsotg,e;
@@
- dwc2_readl(hsotg->regs + e)
+ dwc2_readl(hsotg, e)
<|end_of_text|> | 11,417 |
--- initial
+++ final
@@ -1,12 +1,12 @@
void dwc2_flush_tx_fifo(struct dwc2_hsotg *hsotg, const int num) {
u32 greset;
dev_vdbg(hsotg->dev, "Flush Tx FIFO %d\n", num);
/* Wait for AHB master IDLE state */
if (dwc2_hsotg_wait_bit_set(hsotg, GRSTCTL, GRSTCTL_AHBIDLE, 10000)) dev_warn(hsotg->dev, "%s: HANG! AHB Idle GRSCTL\n", __func__);
greset = GRSTCTL_TXFFLSH;
greset |= num << GRSTCTL_TXFNUM_SHIFT & GRSTCTL_TXFNUM_MASK;
- dwc2_writel(greset, hsotg->regs + GRSTCTL);
+ dwc2_writel(hsotg, greset, GRSTCTL);
if (dwc2_hsotg_wait_bit_clear(hsotg, GRSTCTL, GRSTCTL_TXFFLSH, 10000)) dev_warn(hsotg->dev, "%s: HANG! timeout GRSTCTL GRSTCTL_TXFFLSH\n", __func__);
/* Wait for at least 3 PHY Clocks */
udelay(1);
}<sep>@@
expression hsotg,e,v;
@@
- dwc2_writel(v, hsotg->regs + e)
+ dwc2_writel(hsotg, v, e)
<|end_of_text|> | 11,414 |
--- initial
+++ final
@@ -1,33 +1,33 @@
static void dwc2_wakeup_from_lpm_l1(struct dwc2_hsotg *hsotg) {
u32 glpmcfg;
u32 i = 0;
if (hsotg->lx_state != DWC2_L1) {
dev_err(hsotg->dev, "Core isn't in DWC2_L1 state\n");
return;
}
- glpmcfg = dwc2_readl(hsotg->regs + GLPMCFG);
+ glpmcfg = dwc2_readl(hsotg, GLPMCFG);
if (dwc2_is_device_mode(hsotg)) {
dev_dbg(hsotg->dev, "Exit from L1 state\n");
glpmcfg &= ~GLPMCFG_ENBLSLPM;
glpmcfg &= ~GLPMCFG_HIRD_THRES_EN;
- dwc2_writel(glpmcfg, hsotg->regs + GLPMCFG);
+ dwc2_writel(hsotg, glpmcfg, GLPMCFG);
do {
- glpmcfg = dwc2_readl(hsotg->regs + GLPMCFG);
+ glpmcfg = dwc2_readl(hsotg, GLPMCFG);
if (!(glpmcfg & (GLPMCFG_COREL1RES_MASK | GLPMCFG_L1RESUMEOK | GLPMCFG_SLPSTS))) break;
udelay(1);
} while (++i < 200);
if (i == 200) {
dev_err(hsotg->dev, "Failed to exit L1 sleep state in 200us.\n");
return;
}
dwc2_gadget_init_lpm(hsotg);
} else {
/* TODO */
dev_err(hsotg->dev, "Host side LPM is not supported.\n");
return;
}
/* Change to L0 state */
hsotg->lx_state = DWC2_L0;
/* Inform gadget to exit from L1 */
call_gadget(hsotg, resume);
}<sep>@@
expression hsotg,e;
@@
- dwc2_readl(hsotg->regs + e)
+ dwc2_readl(hsotg, e)
@@
expression hsotg,e,v;
@@
- dwc2_writel(v, hsotg->regs + e)
+ dwc2_writel(hsotg, v, e)
<|end_of_text|> | 11,436 |
--- initial
+++ final
@@ -1,53 +1,53 @@
static int _dwc2_hcd_resume(struct usb_hcd *hcd) {
struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&hsotg->lock, flags);
if (dwc2_is_device_mode(hsotg)) goto unlock;
if (hsotg->lx_state != DWC2_L2) goto unlock;
if (hsotg->params.power_down != DWC2_POWER_DOWN_PARAM_PARTIAL) {
hsotg->lx_state = DWC2_L0;
goto unlock;
}
/*
* Set HW accessible bit before powering on the controller
* since an interrupt may rise.
*/
set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
/*
* Enable power if not already done.
* This must not be spinlocked since duration
* of this call is unknown.
*/
if (!IS_ERR_OR_NULL(hsotg->uphy)) {
spin_unlock_irqrestore(&hsotg->lock, flags);
usb_phy_set_suspend(hsotg->uphy, false);
spin_lock_irqsave(&hsotg->lock, flags);
}
/* Exit partial_power_down */
ret = dwc2_exit_partial_power_down(hsotg, true);
if (ret && (ret != -ENOTSUPP)) dev_err(hsotg->dev, "exit partial_power_down failed\n");
hsotg->lx_state = DWC2_L0;
spin_unlock_irqrestore(&hsotg->lock, flags);
if (hsotg->bus_suspended) {
spin_lock_irqsave(&hsotg->lock, flags);
hsotg->flags.b.port_suspend_change = 1;
spin_unlock_irqrestore(&hsotg->lock, flags);
dwc2_port_resume(hsotg);
} else {
dwc2_vbus_supply_init(hsotg);
/* Wait for controller to correctly update D+/D- level */
usleep_range(3000, 5000);
/*
* Clear Port Enable and Port Status changes.
* Enable Port Power.
*/
- dwc2_writel(HPRT0_PWR | HPRT0_CONNDET | HPRT0_ENACHG, hsotg->regs + HPRT0);
+ dwc2_writel(hsotg, HPRT0_PWR | HPRT0_CONNDET | HPRT0_ENACHG, HPRT0);
/* Wait for controller to detect Port Connect */
usleep_range(5000, 7000);
}
return ret;
unlock:
spin_unlock_irqrestore(&hsotg->lock, flags);
return ret;
}<sep>@@
expression hsotg,e,v;
@@
- dwc2_writel(v, hsotg->regs + e)
+ dwc2_writel(hsotg, v, e)
<|end_of_text|> | 11,486 |
--- initial
+++ final
@@ -1,4 +1,4 @@
static void dwc2_hsotg_core_disconnect(struct dwc2_hsotg *hsotg) {
/* set the soft-disconnect bit */
- dwc2_set_bit(hsotg->regs + DCTL, DCTL_SFTDISCON);
+ dwc2_set_bit(hsotg, DCTL, DCTL_SFTDISCON);
}<sep>@@
expression e1,e2,hsotg;
@@
- dwc2_set_bit(hsotg->regs + e1, e2)
+ dwc2_set_bit(hsotg, e1, e2)
<|end_of_text|> | 11,456 |
--- initial
+++ final
@@ -1,39 +1,39 @@
static void dwc2_port_suspend(struct dwc2_hsotg *hsotg, u16 windex) {
unsigned long flags;
u32 hprt0;
u32 pcgctl;
u32 gotgctl;
dev_dbg(hsotg->dev, "%s()\n", __func__);
spin_lock_irqsave(&hsotg->lock, flags);
if (windex == hsotg->otg_port && dwc2_host_is_b_hnp_enabled(hsotg)) {
- gotgctl = dwc2_readl(hsotg->regs + GOTGCTL);
+ gotgctl = dwc2_readl(hsotg, GOTGCTL);
gotgctl |= GOTGCTL_HSTSETHNPEN;
- dwc2_writel(gotgctl, hsotg->regs + GOTGCTL);
+ dwc2_writel(hsotg, gotgctl, GOTGCTL);
hsotg->op_state = OTG_STATE_A_SUSPEND;
}
hprt0 = dwc2_read_hprt0(hsotg);
hprt0 |= HPRT0_SUSP;
- dwc2_writel(hprt0, hsotg->regs + HPRT0);
+ dwc2_writel(hsotg, hprt0, HPRT0);
hsotg->bus_suspended = true;
/*
* If power_down is supported, Phy clock will be suspended
* after registers are backuped.
*/
if (!hsotg->params.power_down) {
/* Suspend the Phy Clock */
- pcgctl = dwc2_readl(hsotg->regs + PCGCTL);
+ pcgctl = dwc2_readl(hsotg, PCGCTL);
pcgctl |= PCGCTL_STOPPCLK;
- dwc2_writel(pcgctl, hsotg->regs + PCGCTL);
+ dwc2_writel(hsotg, pcgctl, PCGCTL);
udelay(10);
}
/* For HNP the bus must be suspended for at least 200ms */
if (dwc2_host_is_b_hnp_enabled(hsotg)) {
- pcgctl = dwc2_readl(hsotg->regs + PCGCTL);
+ pcgctl = dwc2_readl(hsotg, PCGCTL);
pcgctl &= ~PCGCTL_STOPPCLK;
- dwc2_writel(pcgctl, hsotg->regs + PCGCTL);
+ dwc2_writel(hsotg, pcgctl, PCGCTL);
spin_unlock_irqrestore(&hsotg->lock, flags);
msleep(200);
} else {
spin_unlock_irqrestore(&hsotg->lock, flags);
}
}<sep>@@
expression hsotg,e;
@@
- dwc2_readl(hsotg->regs + e)
+ dwc2_readl(hsotg, e)
@@
expression hsotg,e,v;
@@
- dwc2_writel(v, hsotg->regs + e)
+ dwc2_writel(hsotg, v, e)
<|end_of_text|> | 11,533 |
--- initial
+++ final
@@ -1,65 +1,65 @@
static void dwc2_release_channel(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan, struct dwc2_qtd *qtd, enum dwc2_halt_status halt_status) {
enum dwc2_transaction_type tr_type;
u32 haintmsk;
int free_qtd = 0;
if (dbg_hc(chan)) dev_vdbg(hsotg->dev, " %s: channel %d, halt_status %d\n", __func__, chan->hc_num, halt_status);
switch (halt_status) {
case DWC2_HC_XFER_URB_COMPLETE: free_qtd = 1; break;
case DWC2_HC_XFER_AHB_ERR:
case DWC2_HC_XFER_STALL:
case DWC2_HC_XFER_BABBLE_ERR: free_qtd = 1; break;
case DWC2_HC_XFER_XACT_ERR:
if (qtd && qtd->error_count >= 3) {
dev_vdbg(hsotg->dev, " Complete URB with transaction error\n");
free_qtd = 1;
dwc2_host_complete(hsotg, qtd, -EPROTO);
}
break;
case DWC2_HC_XFER_URB_DEQUEUE:
/*
* The QTD has already been removed and the QH has been
* deactivated. Don't want to do anything except release the
* host channel and try to queue more transfers.
*/
goto cleanup;
case DWC2_HC_XFER_PERIODIC_INCOMPLETE:
dev_vdbg(hsotg->dev, " Complete URB with I/O error\n");
free_qtd = 1;
dwc2_host_complete(hsotg, qtd, -EIO);
break;
case DWC2_HC_XFER_NO_HALT_STATUS:
default: break;
}
dwc2_deactivate_qh(hsotg, chan->qh, free_qtd);
cleanup:
/*
* Release the host channel for use by other transfers. The cleanup
* function clears the channel interrupt enables and conditions, so
* there's no need to clear the Channel Halted interrupt separately.
*/
if (!list_empty(&chan->hc_list_entry)) list_del(&chan->hc_list_entry);
dwc2_hc_cleanup(hsotg, chan);
list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list);
if (hsotg->params.uframe_sched) {
hsotg->available_host_channels++;
} else {
switch (chan->ep_type) {
case USB_ENDPOINT_XFER_CONTROL:
case USB_ENDPOINT_XFER_BULK: hsotg->non_periodic_channels--; break;
default:
/*
* Don't release reservations for periodic channels
* here. That's done when a periodic transfer is
* descheduled (i.e. when the QH is removed from the
* periodic schedule).
*/
break;
}
}
- haintmsk = dwc2_readl(hsotg->regs + HAINTMSK);
+ haintmsk = dwc2_readl(hsotg, HAINTMSK);
haintmsk &= ~(1 << chan->hc_num);
- dwc2_writel(haintmsk, hsotg->regs + HAINTMSK);
+ dwc2_writel(hsotg, haintmsk, HAINTMSK);
/* Try to queue more transfers now that there's a free channel */
tr_type = dwc2_hcd_select_transactions(hsotg);
if (tr_type != DWC2_TRANSACTION_NONE) dwc2_hcd_queue_transactions(hsotg, tr_type);
}<sep>@@
expression hsotg,e;
@@
- dwc2_readl(hsotg->regs + e)
+ dwc2_readl(hsotg, e)
@@
expression hsotg,e,v;
@@
- dwc2_writel(v, hsotg->regs + e)
+ dwc2_writel(hsotg, v, e)
<|end_of_text|> | 11,550 |
--- initial
+++ final
@@ -1,13 +1,13 @@
void dwc2_hc_cleanup(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan) {
u32 hcintmsk;
chan->xfer_started = 0;
list_del_init(&chan->split_order_list_entry);
/*
* Clear channel interrupt enables and any unhandled channel interrupt
* conditions
*/
- dwc2_writel(0, hsotg->regs + HCINTMSK(chan->hc_num));
+ dwc2_writel(hsotg, 0, HCINTMSK(chan->hc_num));
hcintmsk = 0xffffffff;
hcintmsk &= ~HCINTMSK_RESERVED14_31;
- dwc2_writel(hcintmsk, hsotg->regs + HCINT(chan->hc_num));
+ dwc2_writel(hsotg, hcintmsk, HCINT(chan->hc_num));
}<sep>@@
expression hsotg,e,v;
@@
- dwc2_writel(v, hsotg->regs + e)
+ dwc2_writel(hsotg, v, e)
<|end_of_text|> | 11,503 |
--- initial
+++ final
@@ -1,25 +1,24 @@
static int tg3_alloc_consistent(struct tg3 *tp) {
- tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) * (TG3_RX_RING_SIZE + TG3_RX_JUMBO_RING_SIZE)) + (sizeof(struct tx_ring_info) * TG3_TX_RING_SIZE), GFP_KERNEL);
+ tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) * (TG3_RX_RING_SIZE + TG3_RX_JUMBO_RING_SIZE)) + (sizeof(struct tx_ring_info) * TG3_TX_RING_SIZE), GFP_KERNEL);
if (!tp->rx_std_buffers) return -ENOMEM;
- memset(tp->rx_std_buffers, 0, (sizeof(struct ring_info) * (TG3_RX_RING_SIZE + TG3_RX_JUMBO_RING_SIZE)) + (sizeof(struct tx_ring_info) * TG3_TX_RING_SIZE));
tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
tp->tx_buffers = (struct tx_ring_info *)&tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES, &tp->rx_std_mapping);
if (!tp->rx_std) goto err_out;
tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES, &tp->rx_jumbo_mapping);
if (!tp->rx_jumbo) goto err_out;
tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp), &tp->rx_rcb_mapping);
if (!tp->rx_rcb) goto err_out;
tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES, &tp->tx_desc_mapping);
if (!tp->tx_ring) goto err_out;
tp->hw_status = pci_alloc_consistent(tp->pdev, TG3_HW_STATUS_SIZE, &tp->status_mapping);
if (!tp->hw_status) goto err_out;
tp->hw_stats = pci_alloc_consistent(tp->pdev, sizeof(struct tg3_hw_stats), &tp->stats_mapping);
if (!tp->hw_stats) goto err_out;
memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
return 0;
err_out:
tg3_free_consistent(tp);
return -ENOMEM;
}<sep>@@
expression e1,e2,e3;
identifier print1,print2;
constant char[] c1;
constant char[] c2;
statement S;
@@
- e3 = kmalloc(e1, e2)
+ e3 = kzalloc(e1, e2)
<... when != e3
(
print1(...,c1,...,e3,...);
|
kfree(e3);
)
...>
if (e3 == NULL || ...) S
<... when != e3
(
print2(...,c2,...,e3,...);
|
kfree(e3);
)
...>
- memset(e3, 0, e1);
<|end_of_text|> | 548 |
--- initial
+++ final
@@ -1,50 +1,49 @@
static int wl_iw_get_aplist(struct net_device *dev, struct iw_request_info *info, struct iw_point *dwrq, char *extra) {
wl_scan_results_t *list;
struct sockaddr *addr = (struct sockaddr *)extra;
struct iw_quality qual[IW_MAX_AP];
wl_bss_info_t *bi = NULL;
int error, i;
uint buflen = dwrq->length;
WL_TRACE("%s: SIOCGIWAPLIST\n", dev->name);
if (!extra) return -EINVAL;
- list = kmalloc(buflen, GFP_KERNEL);
+ list = kzalloc(buflen, GFP_KERNEL);
if (!list) return -ENOMEM;
- memset(list, 0, buflen);
list->buflen = cpu_to_le32(buflen);
error = dev_wlc_ioctl(dev, WLC_SCAN_RESULTS, list, buflen);
if (error) {
WL_ERROR("%d: Scan results error %d\n", __LINE__, error);
kfree(list);
return error;
}
list->buflen = le32_to_cpu(list->buflen);
list->version = le32_to_cpu(list->version);
list->count = le32_to_cpu(list->count);
if (list->version != WL_BSS_INFO_VERSION) {
WL_ERROR("%s : list->version %d != WL_BSS_INFO_VERSION\n", __func__, list->version);
kfree(list);
return -EINVAL;
}
for (i = 0, dwrq->length = 0; i < list->count && dwrq->length < IW_MAX_AP; i++) {
bi = bi ? (wl_bss_info_t *)((unsigned long)bi + le32_to_cpu(bi->length)) : list->bss_info;
ASSERT(((unsigned long)bi + le32_to_cpu(bi->length)) <= ((unsigned long)list + buflen));
if (!(le16_to_cpu(bi->capability) & WLAN_CAPABILITY_ESS)) continue;
memcpy(addr[dwrq->length].sa_data, &bi->BSSID, ETH_ALEN);
addr[dwrq->length].sa_family = ARPHRD_ETHER;
qual[dwrq->length].qual = rssi_to_qual(le16_to_cpu(bi->RSSI));
qual[dwrq->length].level = 0x100 + le16_to_cpu(bi->RSSI);
qual[dwrq->length].noise = 0x100 + bi->phy_noise;
#if WIRELESS_EXT > 18
qual[dwrq->length].updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
#else
qual[dwrq->length].updated = 7;
#endif
dwrq->length++;
}
kfree(list);
if (dwrq->length) {
memcpy(&addr[dwrq->length], qual, sizeof(struct iw_quality) * dwrq->length);
dwrq->flags = 1;
}
return 0;
}<sep>@@
expression e1,e2,e3;
identifier print1,print2;
constant char[] c1;
constant char[] c2;
statement S;
@@
- e3 = kmalloc(e1, e2)
+ e3 = kzalloc(e1, e2)
<... when != e3
(
print1(...,c1,...,e3,...);
|
kfree(e3);
)
...>
if (e3 == NULL || ...) S
<... when != e3
(
print2(...,c2,...,e3,...);
|
kfree(e3);
)
...>
- memset(e3, 0, e1);
<|end_of_text|> | 600 |
--- initial
+++ final
@@ -1,16 +1,15 @@
static int ac6_seq_open(struct inode *inode, struct file *file) {
struct seq_file *seq;
int rc = -ENOMEM;
- struct ac6_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL);
+ struct ac6_iter_state *s = kzalloc(sizeof(*s), GFP_KERNEL);
if (!s) goto out;
rc = seq_open(file, &ac6_seq_ops);
if (rc) goto out_kfree;
seq = file->private_data;
seq->private = s;
- memset(s, 0, sizeof(*s));
out:
return rc;
out_kfree:
kfree(s);
goto out;
}<sep>@@
expression e1,e2,r;
statement S;
identifier e3,print1,print2,print3;
constant char[] c1;
constant char[] c2;
constant char[] c3;
type T;
@@
T e3 =
- kmalloc
+ kzalloc
(e1, e2);
<... when != e3
(
print1(...,c1,...,e3,...);
|
kfree(e3);
)
...>
if (e3 == NULL || ...) S
<... when != e3
(
print2(...,c2,...,e3,...);
|
kfree(e3);
)
...>
(
r = e3;
|
dev_set_drvdata(r,e3);
)
<... when != e3
when != r
(
print3(...,c3,...,e3,...);
|
kfree(e3);
)
...>
- memset(e3, 0, e1);
<|end_of_text|> | 530 |
--- initial
+++ final
@@ -1,87 +1,86 @@
int save_vga(struct vgastate *state) {
struct regstate *saved;
- saved = kmalloc(sizeof(struct regstate), GFP_KERNEL);
+ saved = kzalloc(sizeof(struct regstate), GFP_KERNEL);
if (saved == NULL) return 1;
- memset(saved, 0, sizeof(struct regstate));
state->vidstate = (void *)saved;
if (state->flags & VGA_SAVE_CMAP) {
saved->vga_cmap = vmalloc(768);
if (!saved->vga_cmap) {
vga_cleanup(state);
return 1;
}
save_vga_cmap(state);
}
if (state->flags & VGA_SAVE_MODE) {
int total;
if (state->num_attr < 21) state->num_attr = 21;
if (state->num_crtc < 25) state->num_crtc = 25;
if (state->num_gfx < 9) state->num_gfx = 9;
if (state->num_seq < 5) state->num_seq = 5;
total = state->num_attr + state->num_crtc + state->num_gfx + state->num_seq;
saved->attr = vmalloc(total);
if (!saved->attr) {
vga_cleanup(state);
return 1;
}
saved->crtc = saved->attr + state->num_attr;
saved->gfx = saved->crtc + state->num_crtc;
saved->seq = saved->gfx + state->num_gfx;
save_vga_mode(state);
}
if (state->flags & VGA_SAVE_FONTS) {
void __iomem *fbbase;
/* exit if window is less than 32K */
if (state->memsize && state->memsize < 4 * 8192) {
vga_cleanup(state);
return 1;
}
if (!state->memsize) state->memsize = 8 * 8192;
if (!state->membase) state->membase = 0xA0000;
fbbase = ioremap(state->membase, state->memsize);
if (!fbbase) {
vga_cleanup(state);
return 1;
}
/*
* save only first 32K used by vgacon
*/
if (state->flags & VGA_SAVE_FONT0) {
saved->vga_font0 = vmalloc(4 * 8192);
if (!saved->vga_font0) {
iounmap(fbbase);
vga_cleanup(state);
return 1;
}
}
/*
* largely unused, but if required by the caller
* we'll just save everything.
*/
if (state->flags & VGA_SAVE_FONT1) {
saved->vga_font1 = vmalloc(state->memsize);
if (!saved->vga_font1) {
iounmap(fbbase);
vga_cleanup(state);
return 1;
}
}
/*
* Save 8K at plane0[0], and 8K at plane1[16K]
*/
if (state->flags & VGA_SAVE_TEXT) {
saved->vga_text = vmalloc(8192 * 2);
if (!saved->vga_text) {
iounmap(fbbase);
vga_cleanup(state);
return 1;
}
}
save_vga_text(state, fbbase);
iounmap(fbbase);
}
return 0;
}<sep>@@
expression e1,e2,e3;
identifier print1,print2;
constant char[] c1;
constant char[] c2;
statement S;
@@
- e3 = kmalloc(e1, e2)
+ e3 = kzalloc(e1, e2)
<... when != e3
(
print1(...,c1,...,e3,...);
|
kfree(e3);
)
...>
if (e3 == NULL || ...) S
<... when != e3
(
print2(...,c2,...,e3,...);
|
kfree(e3);
)
...>
- memset(e3, 0, e1);
<|end_of_text|> | 522 |
--- initial
+++ final
@@ -1,24 +1,23 @@
static int garmin_attach(struct usb_serial *serial) {
int status = 0;
struct usb_serial_port *port = serial->port[0];
struct garmin_data *garmin_data_p = NULL;
dbg("%s", __FUNCTION__);
- garmin_data_p = kmalloc(sizeof(struct garmin_data), GFP_KERNEL);
+ garmin_data_p = kzalloc(sizeof(struct garmin_data), GFP_KERNEL);
if (garmin_data_p == NULL) {
dev_err(&port->dev, "%s - Out of memory\n", __FUNCTION__);
return -ENOMEM;
}
- memset(garmin_data_p, 0, sizeof(struct garmin_data));
init_timer(&garmin_data_p->timer);
spin_lock_init(&garmin_data_p->lock);
INIT_LIST_HEAD(&garmin_data_p->pktlist);
// garmin_data_p->timer.expires = jiffies + session_timeout;
garmin_data_p->timer.data = (unsigned long)garmin_data_p;
garmin_data_p->timer.function = timeout_handler;
garmin_data_p->port = port;
garmin_data_p->state = 0;
garmin_data_p->count = 0;
usb_set_serial_port_data(port, garmin_data_p);
status = garmin_init_session(port);
return status;
}<sep>@@
expression e1,e2,e3;
identifier print1,print2;
constant char[] c1;
constant char[] c2;
statement S;
@@
- e3 = kmalloc(e1, e2)
+ e3 = kzalloc(e1, e2)
<... when != e3
(
print1(...,c1,...,e3,...);
|
kfree(e3);
)
...>
if (e3 == NULL || ...) S
<... when != e3
(
print2(...,c2,...,e3,...);
|
kfree(e3);
)
...>
- memset(e3, 0, e1);
<|end_of_text|> | 545 |
--- initial
+++ final
@@ -1,16 +1,15 @@
static struct sdio_tx *alloc_tx_struct(struct tx_cxt *tx) {
struct sdio_tx *t = NULL;
- t = kmalloc(sizeof(*t), GFP_ATOMIC);
+ t = kzalloc(sizeof(*t), GFP_ATOMIC);
if (t == NULL) goto out;
- memset(t, 0, sizeof(*t));
t->buf = kmalloc(TX_BUF_SIZE, GFP_ATOMIC);
if (t->buf == NULL) goto out;
t->tx_cxt = tx;
return t;
out:
if (t) {
kfree(t->buf);
kfree(t);
}
return NULL;
}<sep>@@
expression e1,e2,e3;
identifier print1,print2;
constant char[] c1;
constant char[] c2;
statement S;
@@
- e3 = kmalloc(e1, e2)
+ e3 = kzalloc(e1, e2)
<... when != e3
(
print1(...,c1,...,e3,...);
|
kfree(e3);
)
...>
if (e3 == NULL || ...) S
<... when != e3
(
print2(...,c2,...,e3,...);
|
kfree(e3);
)
...>
- memset(e3, 0, e1);
<|end_of_text|> | 623 |
--- initial
+++ final
@@ -1,58 +1,57 @@
static int esp6_init_state(struct xfrm_state *x) {
struct esp_data *esp = NULL;
/* null auth and encryption can have zero length keys */
if (x->aalg) {
if (x->aalg->alg_key_len > 512) goto error;
}
if (x->ealg == NULL) goto error;
if (x->encap) goto error;
- esp = kmalloc(sizeof(*esp), GFP_KERNEL);
+ esp = kzalloc(sizeof(*esp), GFP_KERNEL);
if (esp == NULL) return -ENOMEM;
- memset(esp, 0, sizeof(*esp));
if (x->aalg) {
struct xfrm_algo_desc *aalg_desc;
esp->auth.key = x->aalg->alg_key;
esp->auth.key_len = (x->aalg->alg_key_len + 7) / 8;
esp->auth.tfm = crypto_alloc_tfm(x->aalg->alg_name, 0);
if (esp->auth.tfm == NULL) goto error;
esp->auth.icv = esp_hmac_digest;
aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
BUG_ON(!aalg_desc);
if (aalg_desc->uinfo.auth.icv_fullbits / 8 != crypto_tfm_alg_digestsize(esp->auth.tfm)) {
printk(KERN_INFO "ESP: %s digestsize %u != %hu\n", x->aalg->alg_name, crypto_tfm_alg_digestsize(esp->auth.tfm), aalg_desc->uinfo.auth.icv_fullbits / 8);
goto error;
}
esp->auth.icv_full_len = aalg_desc->uinfo.auth.icv_fullbits / 8;
esp->auth.icv_trunc_len = aalg_desc->uinfo.auth.icv_truncbits / 8;
esp->auth.work_icv = kmalloc(esp->auth.icv_full_len, GFP_KERNEL);
if (!esp->auth.work_icv) goto error;
}
esp->conf.key = x->ealg->alg_key;
esp->conf.key_len = (x->ealg->alg_key_len + 7) / 8;
if (x->props.ealgo == SADB_EALG_NULL)
esp->conf.tfm = crypto_alloc_tfm(x->ealg->alg_name, CRYPTO_TFM_MODE_ECB);
else
esp->conf.tfm = crypto_alloc_tfm(x->ealg->alg_name, CRYPTO_TFM_MODE_CBC);
if (esp->conf.tfm == NULL) goto error;
esp->conf.ivlen = crypto_tfm_alg_ivsize(esp->conf.tfm);
esp->conf.padlen = 0;
if (esp->conf.ivlen) {
esp->conf.ivec = kmalloc(esp->conf.ivlen, GFP_KERNEL);
if (unlikely(esp->conf.ivec == NULL)) goto error;
get_random_bytes(esp->conf.ivec, esp->conf.ivlen);
}
if (crypto_cipher_setkey(esp->conf.tfm, esp->conf.key, esp->conf.key_len)) goto error;
x->props.header_len = sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen;
if (x->props.mode) x->props.header_len += sizeof(struct ipv6hdr);
x->data = esp;
return 0;
error:
x->data = esp;
esp6_destroy(x);
x->data = NULL;
return -EINVAL;
}<sep>@@
expression e1,e2,e3;
identifier print1,print2;
constant char[] c1;
constant char[] c2;
statement S;
@@
- e3 = kmalloc(e1, e2)
+ e3 = kzalloc(e1, e2)
<... when != e3
(
print1(...,c1,...,e3,...);
|
kfree(e3);
)
...>
if (e3 == NULL || ...) S
<... when != e3
(
print2(...,c2,...,e3,...);
|
kfree(e3);
)
...>
- memset(e3, 0, e1);
<|end_of_text|> | 532 |
--- initial
+++ final
@@ -1,17 +1,16 @@
static int scan_for_bad_eraseblocks(void) {
int i, bad = 0;
- bbt = kmalloc(ebcnt, GFP_KERNEL);
+ bbt = kzalloc(ebcnt, GFP_KERNEL);
if (!bbt) {
printk(PRINT_PREF "error: cannot allocate memory\n");
return -ENOMEM;
}
- memset(bbt, 0, ebcnt);
printk(PRINT_PREF "scanning for bad eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {
bbt[i] = is_block_bad(i) ? 1 : 0;
if (bbt[i]) bad += 1;
cond_resched();
}
printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad);
return 0;
}<sep>@@
expression e1,e2,e3;
identifier print1,print2;
constant char[] c1;
constant char[] c2;
statement S;
@@
- e3 = kmalloc(e1, e2)
+ e3 = kzalloc(e1, e2)
<... when != e3
(
print1(...,c1,...,e3,...);
|
kfree(e3);
)
...>
if (e3 == NULL || ...) S
<... when != e3
(
print2(...,c2,...,e3,...);
|
kfree(e3);
)
...>
- memset(e3, 0, e1);
<|end_of_text|> | 583 |
--- initial
+++ final
@@ -1,32 +1,31 @@
static void subsys_create_adapter(struct hpi_message *phm, struct hpi_response *phr) {
/* create temp adapter obj, because we don't know what index yet */
struct hpi_adapter_obj ao;
u32 os_error_code;
u16 err;
HPI_DEBUG_LOG(DEBUG, " subsys_create_adapter\n");
memset(&ao, 0, sizeof(ao));
/* this HPI only creates adapters for TI/PCI devices */
if (phm->u.s.resource.bus_type != HPI_BUS_PCI) return;
if (phm->u.s.resource.r.pci->vendor_id != HPI_PCI_VENDOR_ID_TI) return;
if (phm->u.s.resource.r.pci->device_id != HPI_PCI_DEV_ID_DSP6205) return;
- ao.priv = kmalloc(sizeof(struct hpi_hw_obj), GFP_KERNEL);
+ ao.priv = kzalloc(sizeof(struct hpi_hw_obj), GFP_KERNEL);
if (!ao.priv) {
HPI_DEBUG_LOG(ERROR, "cant get mem for adapter object\n");
phr->error = HPI_ERROR_MEMORY_ALLOC;
return;
}
- memset(ao.priv, 0, sizeof(struct hpi_hw_obj));
ao.pci = *phm->u.s.resource.r.pci;
err = create_adapter_obj(&ao, &os_error_code);
if (!err) err = hpi_add_adapter(&ao);
if (err) {
phr->u.s.data = os_error_code;
delete_adapter_obj(&ao);
phr->error = err;
return;
}
phr->u.s.aw_adapter_list[ao.index] = ao.adapter_type;
phr->u.s.adapter_index = ao.index;
phr->u.s.num_adapters++;
phr->error = 0;
}<sep>@@
expression e1,e2,e3;
identifier print1,print2;
constant char[] c1;
constant char[] c2;
statement S;
@@
- e3 = kmalloc(e1, e2)
+ e3 = kzalloc(e1, e2)
<... when != e3
(
print1(...,c1,...,e3,...);
|
kfree(e3);
)
...>
if (e3 == NULL || ...) S
<... when != e3
(
print2(...,c2,...,e3,...);
|
kfree(e3);
)
...>
- memset(e3, 0, e1);
<|end_of_text|> | 569 |
--- initial
+++ final
@@ -1,17 +1,16 @@
struct fb_info *framebuffer_alloc(size_t size, struct device *dev) {
#define BYTES_PER_LONG (BITS_PER_LONG / 8)
#define PADDING (BYTES_PER_LONG - (sizeof(struct fb_info) % BYTES_PER_LONG))
int fb_info_size = sizeof(struct fb_info);
struct fb_info *info;
char *p;
if (size) fb_info_size += PADDING;
- p = kmalloc(fb_info_size + size, GFP_KERNEL);
+ p = kzalloc(fb_info_size + size, GFP_KERNEL);
if (!p) return NULL;
- memset(p, 0, fb_info_size + size);
info = (struct fb_info *)p;
if (size) info->par = p + fb_info_size;
info->device = dev;
return info;
#undef PADDING
#undef BYTES_PER_LONG
}<sep>@@
expression e1,e2,e3;
identifier print1,print2;
constant char[] c1;
constant char[] c2;
statement S;
@@
- e3 = kmalloc(e1, e2)
+ e3 = kzalloc(e1, e2)
<... when != e3
(
print1(...,c1,...,e3,...);
|
kfree(e3);
)
...>
if (e3 == NULL || ...) S
<... when != e3
(
print2(...,c2,...,e3,...);
|
kfree(e3);
)
...>
- memset(e3, 0, e1);
<|end_of_text|> | 521 |
--- initial
+++ final
@@ -1,10 +1,9 @@
static int h4_open(struct hci_uart *hu) {
struct h4_struct *h4;
BT_DBG("hu %p", hu);
- h4 = kmalloc(sizeof(*h4), GFP_ATOMIC);
+ h4 = kzalloc(sizeof(*h4), GFP_ATOMIC);
if (!h4) return -ENOMEM;
- memset(h4, 0, sizeof(*h4));
skb_queue_head_init(&h4->txq);
hu->priv = h4;
return 0;
}<sep>@@
expression e1,e2,e3;
identifier print1,print2;
constant char[] c1;
constant char[] c2;
statement S;
@@
- e3 = kmalloc(e1, e2)
+ e3 = kzalloc(e1, e2)
<... when != e3
(
print1(...,c1,...,e3,...);
|
kfree(e3);
)
...>
if (e3 == NULL || ...) S
<... when != e3
(
print2(...,c2,...,e3,...);
|
kfree(e3);
)
...>
- memset(e3, 0, e1);
<|end_of_text|> | 515 |
--- initial
+++ final
@@ -1,16 +1,15 @@
static struct sdio_tx *alloc_tx_struct(struct tx_cxt *tx) {
struct sdio_tx *t = NULL;
- t = kmalloc(sizeof(*t), GFP_ATOMIC);
+ t = kzalloc(sizeof(*t), GFP_ATOMIC);
if (t == NULL) goto out;
- memset(t, 0, sizeof(*t));
t->buf = kmalloc(TX_BUF_SIZE, GFP_ATOMIC);
if (t->buf == NULL) goto out;
t->tx_cxt = tx;
return t;
out:
if (t) {
kfree(t->buf);
kfree(t);
}
return NULL;
}<sep>@@
expression e1,e2,e3;
identifier print1,print2;
constant char[] c1;
constant char[] c2;
statement S;
@@
- e3 = kmalloc(e1, e2)
+ e3 = kzalloc(e1, e2)
<... when != e3
(
print1(...,c1,...,e3,...);
|
kfree(e3);
)
...>
if (e3 == NULL || ...) S
<... when != e3
(
print2(...,c2,...,e3,...);
|
kfree(e3);
)
...>
- memset(e3, 0, e1);
<|end_of_text|> | 618 |
--- initial
+++ final
@@ -1,49 +1,46 @@
MYPROCOBJECT *visor_proc_CreateObject(MYPROCTYPE *type, const char *name, void *context) {
MYPROCOBJECT *obj = NULL, *rc = NULL;
int i = 0;
if (type == NULL) FAIL("type cannot be NULL", 0);
- obj = kmalloc(sizeof(MYPROCOBJECT), GFP_KERNEL | __GFP_NORETRY);
+ obj = kzalloc(sizeof(MYPROCOBJECT), GFP_KERNEL | __GFP_NORETRY);
if (obj == NULL) FAIL("out of memory", 0);
- memset(obj, 0, sizeof(MYPROCOBJECT));
obj->type = type;
obj->context = context;
if (name == NULL) {
obj->name = NULL;
obj->procDir = type->procDir;
} else {
obj->namesize = strlen(name) + 1;
obj->name = kmalloc(obj->namesize, GFP_KERNEL | __GFP_NORETRY);
if (obj->name == NULL) {
obj->namesize = 0;
FAIL("out of memory", 0);
}
strcpy(obj->name, name);
obj->procDir = createProcDir(obj->name, type->procDir);
if (obj->procDir == NULL) RETPTR(NULL);
}
- obj->procDirPropertyContexts = kmalloc((type->nProperties + 1) * sizeof(PROCDIRENTRYCONTEXT), GFP_KERNEL | __GFP_NORETRY);
+ obj->procDirPropertyContexts = kzalloc((type->nProperties + 1) * sizeof(PROCDIRENTRYCONTEXT), GFP_KERNEL | __GFP_NORETRY);
if (obj->procDirPropertyContexts == NULL) FAIL("out of memory", 0);
- memset(obj->procDirPropertyContexts, 0, (type->nProperties + 1) * sizeof(PROCDIRENTRYCONTEXT));
- obj->procDirProperties = kmalloc((type->nProperties + 1) * sizeof(struct proc_dir_entry *), GFP_KERNEL | __GFP_NORETRY);
+ obj->procDirProperties = kzalloc((type->nProperties + 1) * sizeof(struct proc_dir_entry *), GFP_KERNEL | __GFP_NORETRY);
if (obj->procDirProperties == NULL) FAIL("out of memory", 0);
- memset(obj->procDirProperties, 0, (type->nProperties + 1) * sizeof(struct proc_dir_entry *));
for (i = 0; i < type->nProperties; i++) {
obj->procDirPropertyContexts[i].procObject = obj;
obj->procDirPropertyContexts[i].propertyIndex = i;
obj->procDirPropertyContexts[i].show_property = type->show_property;
if (type->propertyNames[i][0] != '\0') {
/* only create properties that have names */
obj->procDirProperties[i] = createProcFile(type->propertyNames[i], obj->procDir, &proc_fops, &obj->procDirPropertyContexts[i]);
if (obj->procDirProperties[i] == NULL) RETPTR(NULL);
}
}
RETPTR(obj);
Away:
if (rc == NULL) {
if (obj != NULL) {
visor_proc_DestroyObject(obj);
obj = NULL;
}
}
return rc;
}<sep>@@
expression e1,e2,e3;
identifier print1,print2;
constant char[] c1;
constant char[] c2;
statement S;
@@
- e3 = kmalloc(e1, e2)
+ e3 = kzalloc(e1, e2)
<... when != e3
(
print1(...,c1,...,e3,...);
|
kfree(e3);
)
...>
if (e3 == NULL || ...) S
<... when != e3
(
print2(...,c2,...,e3,...);
|
kfree(e3);
)
...>
- memset(e3, 0, e1);
<|end_of_text|> | 644 |
--- initial
+++ final
@@ -1,76 +1,75 @@
static int ixp2000_flash_probe(struct platform_device *dev) {
static const char *probes[] = {"RedBoot", "cmdlinepart", NULL};
struct ixp2000_flash_data *ixp_data = dev->dev.platform_data;
struct flash_platform_data *plat;
struct ixp2000_flash_info *info;
unsigned long window_size;
int err = -1;
if (!ixp_data) return -ENODEV;
plat = ixp_data->platform_data;
if (!plat) return -ENODEV;
window_size = dev->resource->end - dev->resource->start + 1;
dev_info(&dev->dev, "Probe of IXP2000 flash(%d banks x %dMiB)\n", ixp_data->nr_banks, ((u32)window_size >> 20));
if (plat->width != 1) {
dev_err(&dev->dev, "IXP2000 MTD map only supports 8-bit mode, asking for %d\n", plat->width * 8);
return -EIO;
}
- info = kmalloc(sizeof(struct ixp2000_flash_info), GFP_KERNEL);
+ info = kzalloc(sizeof(struct ixp2000_flash_info), GFP_KERNEL);
if (!info) {
err = -ENOMEM;
goto Error;
}
- memset(info, 0, sizeof(struct ixp2000_flash_info));
platform_set_drvdata(dev, info);
/*
* Tell the MTD layer we're not 1:1 mapped so that it does
* not attempt to do a direct access on us.
*/
info->map.phys = NO_XIP;
info->map.size = ixp_data->nr_banks * window_size;
info->map.bankwidth = 1;
/*
* map_priv_2 is used to store a ptr to the bank_setup routine
*/
info->map.map_priv_2 = (unsigned long)ixp_data->bank_setup;
info->map.name = dev_name(&dev->dev);
info->map.read = ixp2000_flash_read8;
info->map.write = ixp2000_flash_write8;
info->map.copy_from = ixp2000_flash_copy_from;
info->map.copy_to = ixp2000_flash_copy_to;
info->res = request_mem_region(dev->resource->start, dev->resource->end - dev->resource->start + 1, dev_name(&dev->dev));
if (!info->res) {
dev_err(&dev->dev, "Could not reserve memory region\n");
err = -ENOMEM;
goto Error;
}
info->map.map_priv_1 = (unsigned long)ioremap(dev->resource->start, dev->resource->end - dev->resource->start + 1);
if (!info->map.map_priv_1) {
dev_err(&dev->dev, "Failed to ioremap flash region\n");
err = -EIO;
goto Error;
}
#if defined(__ARMEB__)
/*
* Enable erratum 44 workaround for NPUs with broken slowport
*/
erratum44_workaround = ixp2000_has_broken_slowport();
dev_info(&dev->dev, "Erratum 44 workaround %s\n", erratum44_workaround ? "enabled" : "disabled");
#endif
info->mtd = do_map_probe(plat->map_name, &info->map);
if (!info->mtd) {
dev_err(&dev->dev, "map_probe failed\n");
err = -ENXIO;
goto Error;
}
info->mtd->owner = THIS_MODULE;
err = parse_mtd_partitions(info->mtd, probes, &info->partitions, 0);
if (err > 0) {
err = add_mtd_partitions(info->mtd, info->partitions, err);
if (err) dev_err(&dev->dev, "Could not parse partitions\n");
}
if (err) goto Error;
return 0;
Error:
ixp2000_flash_remove(dev);
return err;
}<sep>@@
expression e1,e2,e3;
identifier print1,print2;
constant char[] c1;
constant char[] c2;
statement S;
@@
- e3 = kmalloc(e1, e2)
+ e3 = kzalloc(e1, e2)
<... when != e3
(
print1(...,c1,...,e3,...);
|
kfree(e3);
)
...>
if (e3 == NULL || ...) S
<... when != e3
(
print2(...,c2,...,e3,...);
|
kfree(e3);
)
...>
- memset(e3, 0, e1);
<|end_of_text|> | 581 |
--- initial
+++ final
@@ -1,71 +1,70 @@
static int matroxfb_probe(struct pci_dev *pdev, const struct pci_device_id *dummy) {
struct board *b;
u_int16_t svid;
u_int16_t sid;
struct matrox_fb_info *minfo;
int err;
u_int32_t cmd;
DBG(__func__)
svid = pdev->subsystem_vendor;
sid = pdev->subsystem_device;
for (b = dev_list; b->vendor; b++) {
if ((b->vendor != pdev->vendor) || (b->device != pdev->device) || (b->rev < pdev->revision)) continue;
if (b->svid)
if ((b->svid != svid) || (b->sid != sid)) continue;
break;
}
/* not match... */
if (!b->vendor) return -ENODEV;
if (dev > 0) {
/* not requested one... */
dev--;
return -ENODEV;
}
pci_read_config_dword(pdev, PCI_COMMAND, &cmd);
if (pci_enable_device(pdev)) { return -1; }
- minfo = kmalloc(sizeof(*minfo), GFP_KERNEL);
+ minfo = kzalloc(sizeof(*minfo), GFP_KERNEL);
if (!minfo) return -1;
- memset(minfo, 0, sizeof(*minfo));
minfo->pcidev = pdev;
minfo->dead = 0;
minfo->usecount = 0;
minfo->userusecount = 0;
pci_set_drvdata(pdev, minfo);
/* DEVFLAGS */
minfo->devflags.memtype = memtype;
if (memtype != -1) noinit = 0;
if (cmd & PCI_COMMAND_MEMORY) {
minfo->devflags.novga = novga;
minfo->devflags.nobios = nobios;
minfo->devflags.noinit = noinit;
/* subsequent heads always needs initialization and must not enable BIOS */
novga = 1;
nobios = 1;
noinit = 0;
} else {
minfo->devflags.novga = 1;
minfo->devflags.nobios = 1;
minfo->devflags.noinit = 0;
}
minfo->devflags.nopciretry = no_pci_retry;
minfo->devflags.mga_24bpp_fix = inv24;
minfo->devflags.precise_width = option_precise_width;
minfo->devflags.sgram = sgram;
minfo->capable.cross4MB = cross4MB;
spin_lock_init(&minfo->lock.DAC);
spin_lock_init(&minfo->lock.accel);
init_rwsem(&minfo->crtc2.lock);
init_rwsem(&minfo->altout.lock);
mutex_init(&minfo->fbcon.mm_lock);
minfo->irq_flags = 0;
init_waitqueue_head(&minfo->crtc1.vsync.wait);
init_waitqueue_head(&minfo->crtc2.vsync.wait);
minfo->crtc1.panpos = -1;
err = initMatrox2(minfo, b);
if (!err) {
matroxfb_register_device(minfo);
return 0;
}
kfree(minfo);
return -1;
}<sep>@@
expression e1,e2,e3;
identifier print1,print2;
constant char[] c1;
constant char[] c2;
statement S;
@@
- e3 = kmalloc(e1, e2)
+ e3 = kzalloc(e1, e2)
<... when != e3
(
print1(...,c1,...,e3,...);
|
kfree(e3);
)
...>
if (e3 == NULL || ...) S
<... when != e3
(
print2(...,c2,...,e3,...);
|
kfree(e3);
)
...>
- memset(e3, 0, e1);
<|end_of_text|> | 640 |
--- initial
+++ final
@@ -1,52 +1,51 @@
int ipv6_dev_mc_inc(struct net_device *dev, struct in6_addr *addr) {
struct ifmcaddr6 *mc;
struct inet6_dev *idev;
idev = in6_dev_get(dev);
if (idev == NULL) return -EINVAL;
write_lock_bh(&idev->lock);
if (idev->dead) {
write_unlock_bh(&idev->lock);
in6_dev_put(idev);
return -ENODEV;
}
for (mc = idev->mc_list; mc; mc = mc->next) {
if (ipv6_addr_equal(&mc->mca_addr, addr)) {
mc->mca_users++;
write_unlock_bh(&idev->lock);
ip6_mc_add_src(idev, &mc->mca_addr, MCAST_EXCLUDE, 0, NULL, 0);
in6_dev_put(idev);
return 0;
}
}
/*
* not found: create a new one.
*/
- mc = kmalloc(sizeof(struct ifmcaddr6), GFP_ATOMIC);
+ mc = kzalloc(sizeof(struct ifmcaddr6), GFP_ATOMIC);
if (mc == NULL) {
write_unlock_bh(&idev->lock);
in6_dev_put(idev);
return -ENOMEM;
}
- memset(mc, 0, sizeof(struct ifmcaddr6));
init_timer(&mc->mca_timer);
mc->mca_timer.function = igmp6_timer_handler;
mc->mca_timer.data = (unsigned long)mc;
ipv6_addr_copy(&mc->mca_addr, addr);
mc->idev = idev;
mc->mca_users = 1;
/* mca_stamp should be updated upon changes */
mc->mca_cstamp = mc->mca_tstamp = jiffies;
atomic_set(&mc->mca_refcnt, 2);
spin_lock_init(&mc->mca_lock);
/* initial mode is (EX, empty) */
mc->mca_sfmode = MCAST_EXCLUDE;
mc->mca_sfcount[MCAST_EXCLUDE] = 1;
if (ipv6_addr_is_ll_all_nodes(&mc->mca_addr) || IPV6_ADDR_MC_SCOPE(&mc->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL) mc->mca_flags |= MAF_NOREPORT;
mc->next = idev->mc_list;
idev->mc_list = mc;
write_unlock_bh(&idev->lock);
mld_del_delrec(idev, &mc->mca_addr);
igmp6_group_added(mc);
ma_put(mc);
return 0;
}<sep>@@
expression e1,e2,e3;
identifier print1,print2;
constant char[] c1;
constant char[] c2;
statement S;
@@
- e3 = kmalloc(e1, e2)
+ e3 = kzalloc(e1, e2)
<... when != e3
(
print1(...,c1,...,e3,...);
|
kfree(e3);
)
...>
if (e3 == NULL || ...) S
<... when != e3
(
print2(...,c2,...,e3,...);
|
kfree(e3);
)
...>
- memset(e3, 0, e1);
<|end_of_text|> | 539 |
--- initial
+++ final
@@ -1,37 +1,36 @@
struct cfcnfg *cfcnfg_create(void) {
struct cfcnfg *this;
struct cfctrl_rsp *resp;
/* Initiate this layer */
- this = kmalloc(sizeof(struct cfcnfg), GFP_ATOMIC);
+ this = kzalloc(sizeof(struct cfcnfg), GFP_ATOMIC);
if (!this) {
pr_warning("CAIF: %s(): Out of memory\n", __func__);
return NULL;
}
- memset(this, 0, sizeof(struct cfcnfg));
this->mux = cfmuxl_create();
if (!this->mux) goto out_of_mem;
this->ctrl = cfctrl_create();
if (!this->ctrl) goto out_of_mem;
/* Initiate response functions */
resp = cfctrl_get_respfuncs(this->ctrl);
resp->enum_rsp = cfctrl_enum_resp;
resp->linkerror_ind = cfctrl_resp_func;
resp->linkdestroy_rsp = cfcnfg_linkdestroy_rsp;
resp->sleep_rsp = cfctrl_resp_func;
resp->wake_rsp = cfctrl_resp_func;
resp->restart_rsp = cfctrl_resp_func;
resp->radioset_rsp = cfctrl_resp_func;
resp->linksetup_rsp = cfcnfg_linkup_rsp;
resp->reject_rsp = cfcnfg_reject_rsp;
this->last_phyid = 1;
cfmuxl_set_uplayer(this->mux, this->ctrl, 0);
layer_set_dn(this->ctrl, this->mux);
layer_set_up(this->ctrl, this);
return this;
out_of_mem:
pr_warning("CAIF: %s(): Out of memory\n", __func__);
kfree(this->mux);
kfree(this->ctrl);
kfree(this);
return NULL;
}<sep>@@
expression e1,e2,e3;
identifier print1,print2;
constant char[] c1;
constant char[] c2;
statement S;
@@
- e3 = kmalloc(e1, e2)
+ e3 = kzalloc(e1, e2)
<... when != e3
(
print1(...,c1,...,e3,...);
|
kfree(e3);
)
...>
if (e3 == NULL || ...) S
<... when != e3
(
print2(...,c2,...,e3,...);
|
kfree(e3);
)
...>
- memset(e3, 0, e1);
<|end_of_text|> | 563 |
--- initial
+++ final
@@ -1,27 +1,26 @@
static struct usb_tx *alloc_tx_struct(int len) {
struct usb_tx *t = NULL;
int ret = 0;
- t = kmalloc(sizeof(struct usb_tx), GFP_ATOMIC);
+ t = kzalloc(sizeof(struct usb_tx), GFP_ATOMIC);
if (!t) {
ret = -ENOMEM;
goto out;
}
- memset(t, 0, sizeof(struct usb_tx));
t->urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!(len % 512)) len++;
t->buf = kmalloc(len, GFP_ATOMIC);
if (!t->urb || !t->buf) {
ret = -ENOMEM;
goto out;
}
out:
if (ret < 0) {
if (t) {
usb_free_urb(t->urb);
kfree(t->buf);
kfree(t);
}
return NULL;
}
return t;
}<sep>@@
expression e1,e2,e3;
identifier print1,print2;
constant char[] c1;
constant char[] c2;
statement S;
@@
- e3 = kmalloc(e1, e2)
+ e3 = kzalloc(e1, e2)
<... when != e3
(
print1(...,c1,...,e3,...);
|
kfree(e3);
)
...>
if (e3 == NULL || ...) S
<... when != e3
(
print2(...,c2,...,e3,...);
|
kfree(e3);
)
...>
- memset(e3, 0, e1);
<|end_of_text|> | 642 |
--- initial
+++ final
@@ -1,54 +1,53 @@
static struct ip6_flowlabel *fl_create(struct in6_flowlabel_req *freq, char __user *optval, int optlen, int *err_p) {
struct ip6_flowlabel *fl;
int olen;
int addr_type;
int err;
err = -ENOMEM;
- fl = kmalloc(sizeof(*fl), GFP_KERNEL);
+ fl = kzalloc(sizeof(*fl), GFP_KERNEL);
if (fl == NULL) goto done;
- memset(fl, 0, sizeof(*fl));
olen = optlen - CMSG_ALIGN(sizeof(*freq));
if (olen > 0) {
struct msghdr msg;
struct flowi flowi;
int junk;
err = -ENOMEM;
fl->opt = kmalloc(sizeof(*fl->opt) + olen, GFP_KERNEL);
if (fl->opt == NULL) goto done;
memset(fl->opt, 0, sizeof(*fl->opt));
fl->opt->tot_len = sizeof(*fl->opt) + olen;
err = -EFAULT;
if (copy_from_user(fl->opt + 1, optval + CMSG_ALIGN(sizeof(*freq)), olen)) goto done;
msg.msg_controllen = olen;
msg.msg_control = (void *)(fl->opt + 1);
flowi.oif = 0;
err = datagram_send_ctl(&msg, &flowi, fl->opt, &junk, &junk);
if (err) goto done;
err = -EINVAL;
if (fl->opt->opt_flen) goto done;
if (fl->opt->opt_nflen == 0) {
kfree(fl->opt);
fl->opt = NULL;
}
}
fl->expires = jiffies;
err = fl6_renew(fl, freq->flr_linger, freq->flr_expires);
if (err) goto done;
fl->share = freq->flr_share;
addr_type = ipv6_addr_type(&freq->flr_dst);
if ((addr_type & IPV6_ADDR_MAPPED) || addr_type == IPV6_ADDR_ANY) goto done;
ipv6_addr_copy(&fl->dst, &freq->flr_dst);
atomic_set(&fl->users, 1);
switch (fl->share) {
case IPV6_FL_S_EXCL:
case IPV6_FL_S_ANY: break;
case IPV6_FL_S_PROCESS: fl->owner = current->pid; break;
case IPV6_FL_S_USER: fl->owner = current->euid; break;
default: err = -EINVAL; goto done;
}
return fl;
done:
fl_free(fl);
*err_p = err;
return NULL;
}<sep>@@
expression e1,e2,e3;
identifier print1,print2;
constant char[] c1;
constant char[] c2;
statement S;
@@
- e3 = kmalloc(e1, e2)
+ e3 = kzalloc(e1, e2)
<... when != e3
(
print1(...,c1,...,e3,...);
|
kfree(e3);
)
...>
if (e3 == NULL || ...) S
<... when != e3
(
print2(...,c2,...,e3,...);
|
kfree(e3);
)
...>
- memset(e3, 0, e1);
<|end_of_text|> | 533 |
--- initial
+++ final
@@ -1,26 +1,25 @@
struct dvb_frontend *cx24116_attach(const struct cx24116_config *config, struct i2c_adapter *i2c) {
struct cx24116_state *state = NULL;
int ret;
dprintk("%s\n", __func__);
/* allocate memory for the internal state */
- state = kmalloc(sizeof(struct cx24116_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct cx24116_state), GFP_KERNEL);
if (state == NULL) goto error1;
/* setup the state */
- memset(state, 0, sizeof(struct cx24116_state));
state->config = config;
state->i2c = i2c;
/* check if the demod is present */
ret = (cx24116_readreg(state, 0xFF) << 8) | cx24116_readreg(state, 0xFE);
if (ret != 0x0501) {
printk(KERN_INFO "Invalid probe, probably not a CX24116 device\n");
goto error2;
}
/* create dvb_frontend */
memcpy(&state->frontend.ops, &cx24116_ops, sizeof(struct dvb_frontend_ops));
state->frontend.demodulator_priv = state;
return &state->frontend;
error2:
kfree(state);
error1:
return NULL;
}<sep>@@
expression e1,e2,e3;
identifier print1,print2;
constant char[] c1;
constant char[] c2;
statement S;
@@
- e3 = kmalloc(e1, e2)
+ e3 = kzalloc(e1, e2)
<... when != e3
(
print1(...,c1,...,e3,...);
|
kfree(e3);
)
...>
if (e3 == NULL || ...) S
<... when != e3
(
print2(...,c2,...,e3,...);
|
kfree(e3);
)
...>
- memset(e3, 0, e1);
<|end_of_text|> | 558 |
--- initial
+++ final
@@ -1,47 +1,45 @@
int wl_iw_attach(struct net_device *dev, void *dhdp) {
int params_size;
wl_iw_t *iw;
#if defined(WL_IW_USE_ISCAN)
iscan_info_t *iscan = NULL;
if (!dev) return 0;
memset(&g_wl_iw_params, 0, sizeof(wl_iw_extra_params_t));
#ifdef CSCAN
params_size = (WL_SCAN_PARAMS_FIXED_SIZE + offsetof(wl_iscan_params_t, params)) + (WL_NUMCHANNELS * sizeof(u16)) + WL_SCAN_PARAMS_SSID_MAX * sizeof(wlc_ssid_t);
#else
params_size = (WL_SCAN_PARAMS_FIXED_SIZE + offsetof(wl_iscan_params_t, params));
#endif
- iscan = kmalloc(sizeof(iscan_info_t), GFP_KERNEL);
+ iscan = kzalloc(sizeof(iscan_info_t), GFP_KERNEL);
if (!iscan) return -ENOMEM;
- memset(iscan, 0, sizeof(iscan_info_t));
iscan->iscan_ex_params_p = kmalloc(params_size, GFP_KERNEL);
if (!iscan->iscan_ex_params_p) return -ENOMEM;
iscan->iscan_ex_param_size = params_size;
iscan->sysioc_tsk = NULL;
g_iscan = iscan;
iscan->dev = dev;
iscan->iscan_state = ISCAN_STATE_IDLE;
iscan->timer_ms = 3000;
init_timer(&iscan->timer);
iscan->timer.data = (unsigned long)iscan;
iscan->timer.function = wl_iw_timerfunc;
sema_init(&iscan->sysioc_sem, 0);
iscan->sysioc_tsk = kthread_run(_iscan_sysioc_thread, iscan, "_iscan_sysioc");
if (IS_ERR(iscan->sysioc_tsk)) {
iscan->sysioc_tsk = NULL;
return -ENOMEM;
}
#endif /* defined(WL_IW_USE_ISCAN) */
iw = *(wl_iw_t **)netdev_priv(dev);
iw->pub = (dhd_pub_t *)dhdp;
MUTEX_LOCK_INIT(iw->pub);
MUTEX_LOCK_WL_SCAN_SET_INIT();
#ifdef SOFTAP
priv_dev = dev;
MUTEX_LOCK_SOFTAP_SET_INIT(iw->pub);
#endif
- g_scan = kmalloc(G_SCAN_RESULTS, GFP_KERNEL);
+ g_scan = kzalloc(G_SCAN_RESULTS, GFP_KERNEL);
if (!g_scan) return -ENOMEM;
- memset(g_scan, 0, G_SCAN_RESULTS);
g_scan_specified_ssid = 0;
return 0;
}<sep>@@
expression e1,e2,e3;
identifier print1,print2;
constant char[] c1;
constant char[] c2;
statement S;
@@
- e3 = kmalloc(e1, e2)
+ e3 = kzalloc(e1, e2)
<... when != e3
(
print1(...,c1,...,e3,...);
|
kfree(e3);
)
...>
if (e3 == NULL || ...) S
<... when != e3
(
print2(...,c2,...,e3,...);
|
kfree(e3);
)
...>
- memset(e3, 0, e1);
<|end_of_text|> | 599 |
--- initial
+++ final
@@ -1,18 +1,17 @@
static struct usb_tx *alloc_tx_struct(struct tx_cxt *tx) {
struct usb_tx *t = NULL;
- t = kmalloc(sizeof(*t), GFP_ATOMIC);
+ t = kzalloc(sizeof(*t), GFP_ATOMIC);
if (t == NULL) goto out;
- memset(t, 0, sizeof(*t));
t->urb = usb_alloc_urb(0, GFP_ATOMIC);
t->buf = kmalloc(TX_BUF_SIZE, GFP_ATOMIC);
if (t->urb == NULL || t->buf == NULL) goto out;
t->tx_cxt = tx;
return t;
out:
if (t) {
usb_free_urb(t->urb);
kfree(t->buf);
kfree(t);
}
return NULL;
}<sep>@@
expression e1,e2,e3;
identifier print1,print2;
constant char[] c1;
constant char[] c2;
statement S;
@@
- e3 = kmalloc(e1, e2)
+ e3 = kzalloc(e1, e2)
<... when != e3
(
print1(...,c1,...,e3,...);
|
kfree(e3);
)
...>
if (e3 == NULL || ...) S
<... when != e3
(
print2(...,c2,...,e3,...);
|
kfree(e3);
)
...>
- memset(e3, 0, e1);
<|end_of_text|> | 625 |
--- initial
+++ final
@@ -1,17 +1,16 @@
static int bcsp_open(struct hci_uart *hu) {
struct bcsp_struct *bcsp;
BT_DBG("hu %p", hu);
- bcsp = kmalloc(sizeof(*bcsp), GFP_ATOMIC);
+ bcsp = kzalloc(sizeof(*bcsp), GFP_ATOMIC);
if (!bcsp) return -ENOMEM;
- memset(bcsp, 0, sizeof(*bcsp));
hu->priv = bcsp;
skb_queue_head_init(&bcsp->unack);
skb_queue_head_init(&bcsp->rel);
skb_queue_head_init(&bcsp->unrel);
init_timer(&bcsp->tbcsp);
bcsp->tbcsp.function = bcsp_timed_event;
bcsp->tbcsp.data = (u_long)hu;
bcsp->rx_state = BCSP_W4_PKT_DELIMITER;
if (txcrc) bcsp->use_crc = 1;
return 0;
}<sep>@@
expression e1,e2,e3;
identifier print1,print2;
constant char[] c1;
constant char[] c2;
statement S;
@@
- e3 = kmalloc(e1, e2)
+ e3 = kzalloc(e1, e2)
<... when != e3
(
print1(...,c1,...,e3,...);
|
kfree(e3);
)
...>
if (e3 == NULL || ...) S
<... when != e3
(
print2(...,c2,...,e3,...);
|
kfree(e3);
)
...>
- memset(e3, 0, e1);
<|end_of_text|> | 514 |
--- initial
+++ final
@@ -1,15 +1,14 @@
static int drm_add_magic(struct drm_master *master, struct drm_file *priv, drm_magic_t magic) {
struct drm_magic_entry *entry;
struct drm_device *dev = master->minor->dev;
DRM_DEBUG("%d\n", magic);
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry) return -ENOMEM;
- memset(entry, 0, sizeof(*entry));
entry->priv = priv;
entry->hash_item.key = (unsigned long)magic;
mutex_lock(&dev->struct_mutex);
drm_ht_insert_item(&master->magiclist, &entry->hash_item);
list_add_tail(&entry->head, &master->magicfree);
mutex_unlock(&dev->struct_mutex);
return 0;
}<sep>@@
expression e1,e2,e3;
identifier print1,print2;
constant char[] c1;
constant char[] c2;
statement S;
@@
- e3 = kmalloc(e1, e2)
+ e3 = kzalloc(e1, e2)
<... when != e3
(
print1(...,c1,...,e3,...);
|
kfree(e3);
)
...>
if (e3 == NULL || ...) S
<... when != e3
(
print2(...,c2,...,e3,...);
|
kfree(e3);
)
...>
- memset(e3, 0, e1);
<|end_of_text|> | 570 |
--- initial
+++ final
@@ -1,16 +1,15 @@
static int ip6fl_seq_open(struct inode *inode, struct file *file) {
struct seq_file *seq;
int rc = -ENOMEM;
- struct ip6fl_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL);
+ struct ip6fl_iter_state *s = kzalloc(sizeof(*s), GFP_KERNEL);
if (!s) goto out;
rc = seq_open(file, &ip6fl_seq_ops);
if (rc) goto out_kfree;
seq = file->private_data;
seq->private = s;
- memset(s, 0, sizeof(*s));
out:
return rc;
out_kfree:
kfree(s);
goto out;
}<sep>@@
expression e1,e2,r;
statement S;
identifier e3,print1,print2,print3;
constant char[] c1;
constant char[] c2;
constant char[] c3;
type T;
@@
T e3 =
- kmalloc
+ kzalloc
(e1, e2);
<... when != e3
(
print1(...,c1,...,e3,...);
|
kfree(e3);
)
...>
if (e3 == NULL || ...) S
<... when != e3
(
print2(...,c2,...,e3,...);
|
kfree(e3);
)
...>
(
r = e3;
|
dev_set_drvdata(r,e3);
)
<... when != e3
when != r
(
print3(...,c3,...,e3,...);
|
kfree(e3);
)
...>
- memset(e3, 0, e1);
<|end_of_text|> | 534 |
--- initial
+++ final
@@ -1,140 +1,139 @@
dhd_pub_t *dhd_attach(struct dhd_bus *bus, uint bus_hdrlen) {
dhd_info_t *dhd = NULL;
struct net_device *net;
DHD_TRACE(("%s: Enter\n", __func__));
/* updates firmware nvram path if it was provided as module
paramters */
if ((firmware_path != NULL) && (firmware_path[0] != '\0')) strcpy(fw_path, firmware_path);
if ((nvram_path != NULL) && (nvram_path[0] != '\0')) strcpy(nv_path, nvram_path);
/* Allocate etherdev, including space for private structure */
net = alloc_etherdev(sizeof(dhd));
if (!net) {
DHD_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
goto fail;
}
/* Allocate primary dhd_info */
- dhd = kmalloc(sizeof(dhd_info_t), GFP_ATOMIC);
+ dhd = kzalloc(sizeof(dhd_info_t), GFP_ATOMIC);
if (!dhd) {
DHD_ERROR(("%s: OOM - alloc dhd_info\n", __func__));
goto fail;
}
- memset(dhd, 0, sizeof(dhd_info_t));
/*
* Save the dhd_info into the priv
*/
memcpy(netdev_priv(net), &dhd, sizeof(dhd));
/* Set network interface name if it was provided as module parameter */
if (iface_name[0]) {
int len;
char ch;
strncpy(net->name, iface_name, IFNAMSIZ);
net->name[IFNAMSIZ - 1] = 0;
len = strlen(net->name);
ch = net->name[len - 1];
if ((ch > '9' || ch < '0') && (len < IFNAMSIZ - 2)) strcat(net->name, "%d");
}
if (dhd_add_if(dhd, 0, (void *)net, net->name, NULL, 0, 0) == DHD_BAD_IF) goto fail;
net->netdev_ops = NULL;
sema_init(&dhd->proto_sem, 1);
/* Initialize other structure content */
init_waitqueue_head(&dhd->ioctl_resp_wait);
init_waitqueue_head(&dhd->ctrl_wait);
/* Initialize the spinlocks */
spin_lock_init(&dhd->sdlock);
spin_lock_init(&dhd->txqlock);
/* Link to info module */
dhd->pub.info = dhd;
/* Link to bus module */
dhd->pub.bus = bus;
dhd->pub.hdrlen = bus_hdrlen;
/* Attach and link in the protocol */
if (dhd_prot_attach(&dhd->pub) != 0) {
DHD_ERROR(("dhd_prot_attach failed\n"));
goto fail;
}
#if defined(CONFIG_WIRELESS_EXT)
/* Attach and link in the iw */
if (wl_iw_attach(net, (void *)&dhd->pub) != 0) {
DHD_ERROR(("wl_iw_attach failed\n"));
goto fail;
}
#endif /* defined(CONFIG_WIRELESS_EXT) */
/* Attach and link in the cfg80211 */
if (IS_CFG80211_FAVORITE()) {
if (unlikely(wl_cfg80211_attach(net, &dhd->pub))) {
DHD_ERROR(("wl_cfg80211_attach failed\n"));
goto fail;
}
if (!NO_FW_REQ()) {
strcpy(fw_path, wl_cfg80211_get_fwname());
strcpy(nv_path, wl_cfg80211_get_nvramname());
}
wl_cfg80211_dbg_level(DBG_CFG80211_GET());
}
/* Set up the watchdog timer */
init_timer(&dhd->timer);
dhd->timer.data = (unsigned long)dhd;
dhd->timer.function = dhd_watchdog;
/* Initialize thread based operation and lock */
sema_init(&dhd->sdsem, 1);
if ((dhd_watchdog_prio >= 0) && (dhd_dpc_prio >= 0))
dhd->threads_only = true;
else
dhd->threads_only = false;
if (dhd_dpc_prio >= 0) {
/* Initialize watchdog thread */
sema_init(&dhd->watchdog_sem, 0);
dhd->watchdog_tsk = kthread_run(dhd_watchdog_thread, dhd, "dhd_watchdog");
if (IS_ERR(dhd->watchdog_tsk)) {
printk(KERN_WARNING "dhd_watchdog thread failed to start\n");
dhd->watchdog_tsk = NULL;
}
} else {
dhd->watchdog_tsk = NULL;
}
/* Set up the bottom half handler */
if (dhd_dpc_prio >= 0) {
/* Initialize DPC thread */
sema_init(&dhd->dpc_sem, 0);
dhd->dpc_tsk = kthread_run(dhd_dpc_thread, dhd, "dhd_dpc");
if (IS_ERR(dhd->dpc_tsk)) {
printk(KERN_WARNING "dhd_dpc thread failed to start\n");
dhd->dpc_tsk = NULL;
}
} else {
tasklet_init(&dhd->tasklet, dhd_dpc, (unsigned long)dhd);
dhd->dpc_tsk = NULL;
}
if (dhd_sysioc) {
sema_init(&dhd->sysioc_sem, 0);
dhd->sysioc_tsk = kthread_run(_dhd_sysioc_thread, dhd, "_dhd_sysioc");
if (IS_ERR(dhd->sysioc_tsk)) {
printk(KERN_WARNING "_dhd_sysioc thread failed to start\n");
dhd->sysioc_tsk = NULL;
}
} else
dhd->sysioc_tsk = NULL;
/*
* Save the dhd_info into the priv
*/
memcpy(netdev_priv(net), &dhd, sizeof(dhd));
#if defined(CUSTOMER_HW2) && defined(CONFIG_WIFI_CONTROL_FUNC)
g_bus = bus;
#endif
#if defined(CONFIG_PM_SLEEP)
register_pm_notifier(&dhd_sleep_pm_notifier);
#endif /* defined(CONFIG_PM_SLEEP) */
/* && defined(DHD_GPL) */
/* Init lock suspend to prevent kernel going to suspend */
#ifdef CONFIG_HAS_EARLYSUSPEND
dhd->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 20;
dhd->early_suspend.suspend = dhd_early_suspend;
dhd->early_suspend.resume = dhd_late_resume;
register_early_suspend(&dhd->early_suspend);
#endif
return &dhd->pub;
fail:
if (net) free_netdev(net);
if (dhd) dhd_detach(&dhd->pub);
return NULL;
}<sep>@@
expression e1,e2,e3;
identifier print1,print2;
constant char[] c1;
constant char[] c2;
statement S;
@@
- e3 = kmalloc(e1, e2)
+ e3 = kzalloc(e1, e2)
<... when != e3
(
print1(...,c1,...,e3,...);
|
kfree(e3);
)
...>
if (e3 == NULL || ...) S
<... when != e3
(
print2(...,c2,...,e3,...);
|
kfree(e3);
)
...>
- memset(e3, 0, e1);
<|end_of_text|> | 596 |
--- initial
+++ final
@@ -1,161 +1,160 @@
static int __init pcidev_init(struct pci_dev *pdev, const struct pci_device_id *id) {
struct net_device *dev;
struct dev_priv *priv;
struct dev_info *hw_priv;
struct ksz_hw *hw;
struct platform_info *info;
struct ksz_port *port;
unsigned long reg_base;
unsigned long reg_len;
int cnt;
int i;
int mib_port_count;
int pi;
int port_count;
int result;
char banner[sizeof(version)];
struct ksz_switch *sw = NULL;
result = pci_enable_device(pdev);
if (result) return result;
result = -ENODEV;
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) return result;
reg_base = pci_resource_start(pdev, 0);
reg_len = pci_resource_len(pdev, 0);
if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0) return result;
if (!request_mem_region(reg_base, reg_len, DRV_NAME)) return result;
pci_set_master(pdev);
result = -ENOMEM;
info = kzalloc(sizeof(struct platform_info), GFP_KERNEL);
if (!info) goto pcidev_init_dev_err;
hw_priv = &info->dev_info;
hw_priv->pdev = pdev;
hw = &hw_priv->hw;
hw->io = ioremap(reg_base, reg_len);
if (!hw->io) goto pcidev_init_io_err;
cnt = hw_init(hw);
if (!cnt) {
if (msg_enable & NETIF_MSG_PROBE) pr_alert("chip not detected\n");
result = -ENODEV;
goto pcidev_init_alloc_err;
}
snprintf(banner, sizeof(banner), "%s", version);
banner[13] = cnt + '0'; /* Replace x in "Micrel KSZ884x" */
dev_info(&hw_priv->pdev->dev, "%s\n", banner);
dev_dbg(&hw_priv->pdev->dev, "Mem = %p; IRQ = %d\n", hw->io, pdev->irq);
/* Assume device is KSZ8841. */
hw->dev_count = 1;
port_count = 1;
mib_port_count = 1;
hw->addr_list_size = 0;
hw->mib_cnt = PORT_COUNTER_NUM;
hw->mib_port_cnt = 1;
/* KSZ8842 has a switch with multiple ports. */
if (2 == cnt) {
if (fast_aging) hw->overrides |= FAST_AGING;
hw->mib_cnt = TOTAL_PORT_COUNTER_NUM;
/* Multiple network device interfaces are required. */
if (multi_dev) {
hw->dev_count = SWITCH_PORT_NUM;
hw->addr_list_size = SWITCH_PORT_NUM - 1;
}
/* Single network device has multiple ports. */
if (1 == hw->dev_count) {
port_count = SWITCH_PORT_NUM;
mib_port_count = SWITCH_PORT_NUM;
}
hw->mib_port_cnt = TOTAL_PORT_NUM;
- hw->ksz_switch = kmalloc(sizeof(struct ksz_switch), GFP_KERNEL);
+ hw->ksz_switch = kzalloc(sizeof(struct ksz_switch), GFP_KERNEL);
if (!hw->ksz_switch) goto pcidev_init_alloc_err;
- memset(hw->ksz_switch, 0, sizeof(struct ksz_switch));
sw = hw->ksz_switch;
}
for (i = 0; i < hw->mib_port_cnt; i++)
hw->port_mib[i].mib_start = 0;
hw->parent = hw_priv;
/* Default MTU is 1500. */
hw_priv->mtu = (REGULAR_RX_BUF_SIZE + 3) & ~3;
if (ksz_alloc_mem(hw_priv)) goto pcidev_init_mem_err;
hw_priv->hw.id = net_device_present;
spin_lock_init(&hw_priv->hwlock);
mutex_init(&hw_priv->lock);
/* tasklet is enabled. */
tasklet_init(&hw_priv->rx_tasklet, rx_proc_task, (unsigned long)hw_priv);
tasklet_init(&hw_priv->tx_tasklet, tx_proc_task, (unsigned long)hw_priv);
/* tasklet_enable will decrement the atomic counter. */
tasklet_disable(&hw_priv->rx_tasklet);
tasklet_disable(&hw_priv->tx_tasklet);
for (i = 0; i < TOTAL_PORT_NUM; i++)
init_waitqueue_head(&hw_priv->counter[i].counter);
if (macaddr[0] != ':') get_mac_addr(hw_priv, macaddr, MAIN_PORT);
/* Read MAC address and initialize override address if not overrided. */
hw_read_addr(hw);
/* Multiple device interfaces mode requires a second MAC address. */
if (hw->dev_count > 1) {
memcpy(sw->other_addr, hw->override_addr, MAC_ADDR_LEN);
read_other_addr(hw);
if (mac1addr[0] != ':') get_mac_addr(hw_priv, mac1addr, OTHER_PORT);
}
hw_setup(hw);
if (hw->ksz_switch)
sw_setup(hw);
else {
hw_priv->wol_support = WOL_SUPPORT;
hw_priv->wol_enable = 0;
}
INIT_WORK(&hw_priv->mib_read, mib_read_work);
/* 500 ms timeout */
ksz_init_timer(&hw_priv->mib_timer_info, 500 * HZ / 1000, mib_monitor, hw_priv);
for (i = 0; i < hw->dev_count; i++) {
dev = alloc_etherdev(sizeof(struct dev_priv));
if (!dev) goto pcidev_init_reg_err;
info->netdev[i] = dev;
priv = netdev_priv(dev);
priv->adapter = hw_priv;
priv->id = net_device_present++;
port = &priv->port;
port->port_cnt = port_count;
port->mib_port_cnt = mib_port_count;
port->first_port = i;
port->flow_ctrl = PHY_FLOW_CTRL;
port->hw = hw;
port->linked = &hw->port_info[port->first_port];
for (cnt = 0, pi = i; cnt < port_count; cnt++, pi++) {
hw->port_info[pi].port_id = pi;
hw->port_info[pi].pdev = dev;
hw->port_info[pi].state = media_disconnected;
}
dev->mem_start = (unsigned long)hw->io;
dev->mem_end = dev->mem_start + reg_len - 1;
dev->irq = pdev->irq;
if (MAIN_PORT == i)
memcpy(dev->dev_addr, hw_priv->hw.override_addr, MAC_ADDR_LEN);
else {
memcpy(dev->dev_addr, sw->other_addr, MAC_ADDR_LEN);
if (!memcmp(sw->other_addr, hw->override_addr, MAC_ADDR_LEN)) dev->dev_addr[5] += port->first_port;
}
dev->netdev_ops = &netdev_ops;
SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
if (register_netdev(dev)) goto pcidev_init_reg_err;
port_set_power_saving(port, true);
}
pci_dev_get(hw_priv->pdev);
pci_set_drvdata(pdev, info);
return 0;
pcidev_init_reg_err:
for (i = 0; i < hw->dev_count; i++) {
if (info->netdev[i]) {
netdev_free(info->netdev[i]);
info->netdev[i] = NULL;
}
}
pcidev_init_mem_err:
ksz_free_mem(hw_priv);
kfree(hw->ksz_switch);
pcidev_init_alloc_err:
iounmap(hw->io);
pcidev_init_io_err:
kfree(info);
pcidev_init_dev_err:
release_mem_region(reg_base, reg_len);
return result;
}<sep>@@
expression e1,e2,e3;
identifier print1,print2;
constant char[] c1;
constant char[] c2;
statement S;
@@
- e3 = kmalloc(e1, e2)
+ e3 = kzalloc(e1, e2)
<... when != e3
(
print1(...,c1,...,e3,...);
|
kfree(e3);
)
...>
if (e3 == NULL || ...) S
<... when != e3
(
print2(...,c2,...,e3,...);
|
kfree(e3);
)
...>
- memset(e3, 0, e1);
<|end_of_text|> | 567 |
--- initial
+++ final
@@ -1,15 +1,14 @@
static void gw_node_add(struct bat_priv *bat_priv, struct orig_node *orig_node, uint8_t new_gwflags) {
struct gw_node *gw_node;
int down, up;
- gw_node = kmalloc(sizeof(struct gw_node), GFP_ATOMIC);
+ gw_node = kzalloc(sizeof(struct gw_node), GFP_ATOMIC);
if (!gw_node) return;
- memset(gw_node, 0, sizeof(struct gw_node));
INIT_HLIST_NODE(&gw_node->list);
gw_node->orig_node = orig_node;
atomic_set(&gw_node->refcount, 1);
spin_lock_bh(&bat_priv->gw_list_lock);
hlist_add_head_rcu(&gw_node->list, &bat_priv->gw_list);
spin_unlock_bh(&bat_priv->gw_list_lock);
gw_bandwidth_to_kbit(new_gwflags, &down, &up);
bat_dbg(DBG_BATMAN, bat_priv, "Found new gateway %pM -> gw_class: %i - %i%s/%i%s\n", orig_node->orig, new_gwflags, (down > 2048 ? down / 1024 : down), (down > 2048 ? "MBit" : "KBit"), (up > 2048 ? up / 1024 : up), (up > 2048 ? "MBit" : "KBit"));
}<sep>@@
expression e1,e2,e3;
identifier print1,print2;
constant char[] c1;
constant char[] c2;
statement S;
@@
- e3 = kmalloc(e1, e2)
+ e3 = kzalloc(e1, e2)
<... when != e3
(
print1(...,c1,...,e3,...);
|
kfree(e3);
)
...>
if (e3 == NULL || ...) S
<... when != e3
(
print2(...,c2,...,e3,...);
|
kfree(e3);
)
...>
- memset(e3, 0, e1);
<|end_of_text|> | 604 |
--- initial
+++ final
@@ -1,11 +1,10 @@
static struct sdio_rx *alloc_rx_struct(struct rx_cxt *rx) {
struct sdio_rx *r = NULL;
- r = kmalloc(sizeof(*r), GFP_ATOMIC);
+ r = kzalloc(sizeof(*r), GFP_ATOMIC);
if (r == NULL) goto out;
- memset(r, 0, sizeof(*r));
r->rx_cxt = rx;
return r;
out:
kfree(r);
return NULL;
}<sep>@@
expression e1,e2,e3;
identifier print1,print2;
constant char[] c1;
constant char[] c2;
statement S;
@@
- e3 = kmalloc(e1, e2)
+ e3 = kzalloc(e1, e2)
<... when != e3
(
print1(...,c1,...,e3,...);
|
kfree(e3);
)
...>
if (e3 == NULL || ...) S
<... when != e3
(
print2(...,c2,...,e3,...);
|
kfree(e3);
)
...>
- memset(e3, 0, e1);
<|end_of_text|> | 617 |
--- initial
+++ final
@@ -1,34 +1,33 @@
static dev_link_t *btuart_attach(void) {
btuart_info_t *info;
client_reg_t client_reg;
dev_link_t *link;
int ret;
/* Create new info device */
- info = kmalloc(sizeof(*info), GFP_KERNEL);
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info) return NULL;
- memset(info, 0, sizeof(*info));
link = &info->link;
link->priv = info;
link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
link->io.NumPorts1 = 8;
link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT;
link->irq.IRQInfo1 = IRQ_LEVEL_ID;
link->irq.Handler = btuart_interrupt;
link->irq.Instance = info;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.Vcc = 50;
link->conf.IntType = INT_MEMORY_AND_IO;
/* Register with Card Services */
link->next = dev_list;
dev_list = link;
client_reg.dev_info = &dev_info;
client_reg.Version = 0x0210;
client_reg.event_callback_args.client_data = link;
ret = pcmcia_register_client(&link->handle, &client_reg);
if (ret != CS_SUCCESS) {
cs_error(link->handle, RegisterClient, ret);
btuart_detach(link);
return NULL;
}
return link;
}<sep>@@
expression e1,e2,e3;
identifier print1,print2;
constant char[] c1;
constant char[] c2;
statement S;
@@
- e3 = kmalloc(e1, e2)
+ e3 = kzalloc(e1, e2)
<... when != e3
(
print1(...,c1,...,e3,...);
|
kfree(e3);
)
...>
if (e3 == NULL || ...) S
<... when != e3
(
print2(...,c2,...,e3,...);
|
kfree(e3);
)
...>
- memset(e3, 0, e1);
<|end_of_text|> | 512 |
--- initial
+++ final
@@ -1,122 +1,122 @@
int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir, const struct fscrypt_name *nm, const struct inode *inode, int deletion, int xent) {
int err, dlen, ilen, len, lnum, ino_offs, dent_offs;
int aligned_dlen, aligned_ilen, sync = IS_DIRSYNC(dir);
int last_reference = !!(deletion && inode->i_nlink == 0);
struct ubifs_inode *ui = ubifs_inode(inode);
struct ubifs_inode *host_ui = ubifs_inode(dir);
struct ubifs_dent_node *dent;
struct ubifs_ino_node *ino;
union ubifs_key dent_key, ino_key;
- ubifs_assert(mutex_is_locked(&host_ui->ui_mutex));
+ ubifs_assert(c, mutex_is_locked(&host_ui->ui_mutex));
dlen = UBIFS_DENT_NODE_SZ + fname_len(nm) + 1;
ilen = UBIFS_INO_NODE_SZ;
/*
* If the last reference to the inode is being deleted, then there is
* no need to attach and write inode data, it is being deleted anyway.
* And if the inode is being deleted, no need to synchronize
* write-buffer even if the inode is synchronous.
*/
if (!last_reference) {
ilen += ui->data_len;
sync |= IS_SYNC(inode);
}
aligned_dlen = ALIGN(dlen, 8);
aligned_ilen = ALIGN(ilen, 8);
len = aligned_dlen + aligned_ilen + UBIFS_INO_NODE_SZ;
/* Make sure to also account for extended attributes */
len += host_ui->data_len;
dent = kzalloc(len, GFP_NOFS);
if (!dent) return -ENOMEM;
/* Make reservation before allocating sequence numbers */
err = make_reservation(c, BASEHD, len);
if (err) goto out_free;
if (!xent) {
dent->ch.node_type = UBIFS_DENT_NODE;
if (nm->hash)
dent_key_init_hash(c, &dent_key, dir->i_ino, nm->hash);
else
dent_key_init(c, &dent_key, dir->i_ino, nm);
} else {
dent->ch.node_type = UBIFS_XENT_NODE;
xent_key_init(c, &dent_key, dir->i_ino, nm);
}
key_write(c, &dent_key, dent->key);
dent->inum = deletion ? 0 : cpu_to_le64(inode->i_ino);
dent->type = get_dent_type(inode->i_mode);
dent->nlen = cpu_to_le16(fname_len(nm));
memcpy(dent->name, fname_name(nm), fname_len(nm));
dent->name[fname_len(nm)] = '\0';
set_dent_cookie(c, dent);
zero_dent_node_unused(dent);
ubifs_prep_grp_node(c, dent, dlen, 0);
ino = (void *)dent + aligned_dlen;
pack_inode(c, ino, inode, 0);
ino = (void *)ino + aligned_ilen;
pack_inode(c, ino, dir, 1);
if (last_reference) {
err = ubifs_add_orphan(c, inode->i_ino);
if (err) {
release_head(c, BASEHD);
goto out_finish;
}
ui->del_cmtno = c->cmt_no;
}
err = write_head(c, BASEHD, dent, len, &lnum, &dent_offs, sync);
if (err) goto out_release;
if (!sync) {
struct ubifs_wbuf *wbuf = &c->jheads[BASEHD].wbuf;
ubifs_wbuf_add_ino_nolock(wbuf, inode->i_ino);
ubifs_wbuf_add_ino_nolock(wbuf, dir->i_ino);
}
release_head(c, BASEHD);
kfree(dent);
if (deletion) {
if (nm->hash)
err = ubifs_tnc_remove_dh(c, &dent_key, nm->minor_hash);
else
err = ubifs_tnc_remove_nm(c, &dent_key, nm);
if (err) goto out_ro;
err = ubifs_add_dirt(c, lnum, dlen);
} else
err = ubifs_tnc_add_nm(c, &dent_key, lnum, dent_offs, dlen, nm);
if (err) goto out_ro;
/*
* Note, we do not remove the inode from TNC even if the last reference
* to it has just been deleted, because the inode may still be opened.
* Instead, the inode has been added to orphan lists and the orphan
* subsystem will take further care about it.
*/
ino_key_init(c, &ino_key, inode->i_ino);
ino_offs = dent_offs + aligned_dlen;
err = ubifs_tnc_add(c, &ino_key, lnum, ino_offs, ilen);
if (err) goto out_ro;
ino_key_init(c, &ino_key, dir->i_ino);
ino_offs += aligned_ilen;
err = ubifs_tnc_add(c, &ino_key, lnum, ino_offs, UBIFS_INO_NODE_SZ + host_ui->data_len);
if (err) goto out_ro;
finish_reservation(c);
spin_lock(&ui->ui_lock);
ui->synced_i_size = ui->ui_size;
spin_unlock(&ui->ui_lock);
if (xent) {
spin_lock(&host_ui->ui_lock);
host_ui->synced_i_size = host_ui->ui_size;
spin_unlock(&host_ui->ui_lock);
}
mark_inode_clean(c, ui);
mark_inode_clean(c, host_ui);
return 0;
out_finish:
finish_reservation(c);
out_free:
kfree(dent);
return err;
out_release:
release_head(c, BASEHD);
kfree(dent);
out_ro:
ubifs_ro_mode(c, err);
if (last_reference) ubifs_delete_orphan(c, inode->i_ino);
finish_reservation(c);
return err;
}<sep>@@
identifier f,c;
expression e;
@@
f(...,struct ubifs_info *c,...) {
<...
ubifs_assert(
+ c,
e)
...>
}
<|end_of_text|> | 11,770 |
--- initial
+++ final
@@ -1,51 +1,51 @@
static int write_orph_node(struct ubifs_info *c, int atomic) {
struct ubifs_orphan *orphan, *cnext;
struct ubifs_orph_node *orph;
int gap, err, len, cnt, i;
- ubifs_assert(c->cmt_orphans > 0);
+ ubifs_assert(c, c->cmt_orphans > 0);
gap = c->leb_size - c->ohead_offs;
if (gap < UBIFS_ORPH_NODE_SZ + sizeof(__le64)) {
c->ohead_lnum += 1;
c->ohead_offs = 0;
gap = c->leb_size;
if (c->ohead_lnum > c->orph_last) {
/*
* We limit the number of orphans so that this should
* never happen.
*/
ubifs_err(c, "out of space in orphan area");
return -EINVAL;
}
}
cnt = (gap - UBIFS_ORPH_NODE_SZ) / sizeof(__le64);
if (cnt > c->cmt_orphans) cnt = c->cmt_orphans;
len = UBIFS_ORPH_NODE_SZ + cnt * sizeof(__le64);
- ubifs_assert(c->orph_buf);
+ ubifs_assert(c, c->orph_buf);
orph = c->orph_buf;
orph->ch.node_type = UBIFS_ORPH_NODE;
spin_lock(&c->orphan_lock);
cnext = c->orph_cnext;
for (i = 0; i < cnt; i++) {
orphan = cnext;
- ubifs_assert(orphan->cmt);
+ ubifs_assert(c, orphan->cmt);
orph->inos[i] = cpu_to_le64(orphan->inum);
orphan->cmt = 0;
cnext = orphan->cnext;
orphan->cnext = NULL;
}
c->orph_cnext = cnext;
c->cmt_orphans -= cnt;
spin_unlock(&c->orphan_lock);
if (c->cmt_orphans)
orph->cmt_no = cpu_to_le64(c->cmt_no);
else
/* Mark the last node of the commit */
orph->cmt_no = cpu_to_le64((c->cmt_no) | (1ULL << 63));
- ubifs_assert(c->ohead_offs + len <= c->leb_size);
- ubifs_assert(c->ohead_lnum >= c->orph_first);
- ubifs_assert(c->ohead_lnum <= c->orph_last);
+ ubifs_assert(c, c->ohead_offs + len <= c->leb_size);
+ ubifs_assert(c, c->ohead_lnum >= c->orph_first);
+ ubifs_assert(c, c->ohead_lnum <= c->orph_last);
err = do_write_orph_node(c, len, atomic);
c->ohead_offs += ALIGN(len, c->min_io_size);
c->ohead_offs = ALIGN(c->ohead_offs, 8);
return err;
}<sep>@@
identifier f,c;
expression e;
@@
f(...,struct ubifs_info *c,...) {
<...
ubifs_assert(
+ c,
e)
...>
}
<|end_of_text|> | 11,822 |
--- initial
+++ final
@@ -1,42 +1,42 @@
int ubifs_bg_wbufs_sync(struct ubifs_info *c) {
int err, i;
- ubifs_assert(!c->ro_media && !c->ro_mount);
+ ubifs_assert(c, !c->ro_media && !c->ro_mount);
if (!c->need_wbuf_sync) return 0;
c->need_wbuf_sync = 0;
if (c->ro_error) {
err = -EROFS;
goto out_timers;
}
dbg_io("synchronize");
for (i = 0; i < c->jhead_cnt; i++) {
struct ubifs_wbuf *wbuf = &c->jheads[i].wbuf;
cond_resched();
/*
* If the mutex is locked then wbuf is being changed, so
* synchronization is not necessary.
*/
if (mutex_is_locked(&wbuf->io_mutex)) continue;
mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
if (!wbuf->need_sync) {
mutex_unlock(&wbuf->io_mutex);
continue;
}
err = ubifs_wbuf_sync_nolock(wbuf);
mutex_unlock(&wbuf->io_mutex);
if (err) {
ubifs_err(c, "cannot sync write-buffer, error %d", err);
ubifs_ro_mode(c, err);
goto out_timers;
}
}
return 0;
out_timers:
/* Cancel all timers to prevent repeated errors */
for (i = 0; i < c->jhead_cnt; i++) {
struct ubifs_wbuf *wbuf = &c->jheads[i].wbuf;
mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
cancel_wbuf_timer_nolock(wbuf);
mutex_unlock(&wbuf->io_mutex);
}
return err;
}<sep>@@
identifier f,c;
expression e;
@@
f(...,struct ubifs_info *c,...) {
<...
ubifs_assert(
+ c,
e)
...>
}
<|end_of_text|> | 11,752 |
--- initial
+++ final
@@ -1,90 +1,90 @@
int ubifs_find_dirty_leb(struct ubifs_info *c, struct ubifs_lprops *ret_lp, int min_space, int pick_free) {
int err = 0, sum, exclude_index = pick_free == 2 ? 1 : 0;
const struct ubifs_lprops *lp = NULL, *idx_lp = NULL;
struct ubifs_lpt_heap *heap, *idx_heap;
ubifs_get_lprops(c);
if (pick_free) {
int lebs, rsvd_idx_lebs = 0;
spin_lock(&c->space_lock);
lebs = c->lst.empty_lebs + c->idx_gc_cnt;
lebs += c->freeable_cnt - c->lst.taken_empty_lebs;
/*
* Note, the index may consume more LEBs than have been reserved
* for it. It is OK because it might be consolidated by GC.
* But if the index takes fewer LEBs than it is reserved for it,
* this function must avoid picking those reserved LEBs.
*/
if (c->bi.min_idx_lebs >= c->lst.idx_lebs) {
rsvd_idx_lebs = c->bi.min_idx_lebs - c->lst.idx_lebs;
exclude_index = 1;
}
spin_unlock(&c->space_lock);
/* Check if there are enough free LEBs for the index */
if (rsvd_idx_lebs < lebs) {
/* OK, try to find an empty LEB */
lp = ubifs_fast_find_empty(c);
if (lp) goto found;
/* Or a freeable LEB */
lp = ubifs_fast_find_freeable(c);
if (lp) goto found;
} else
/*
* We cannot pick free/freeable LEBs in the below code.
*/
pick_free = 0;
} else {
spin_lock(&c->space_lock);
exclude_index = (c->bi.min_idx_lebs >= c->lst.idx_lebs);
spin_unlock(&c->space_lock);
}
/* Look on the dirty and dirty index heaps */
heap = &c->lpt_heap[LPROPS_DIRTY - 1];
idx_heap = &c->lpt_heap[LPROPS_DIRTY_IDX - 1];
if (idx_heap->cnt && !exclude_index) {
idx_lp = idx_heap->arr[0];
sum = idx_lp->free + idx_lp->dirty;
/*
* Since we reserve thrice as much space for the index than it
* actually takes, it does not make sense to pick indexing LEBs
* with less than, say, half LEB of dirty space. May be half is
* not the optimal boundary - this should be tested and
* checked. This boundary should determine how much we use
* in-the-gaps to consolidate the index comparing to how much
* we use garbage collector to consolidate it. The "half"
* criteria just feels to be fine.
*/
if (sum < min_space || sum < c->half_leb_size) idx_lp = NULL;
}
if (heap->cnt) {
lp = heap->arr[0];
if (lp->dirty + lp->free < min_space) lp = NULL;
}
/* Pick the LEB with most space */
if (idx_lp && lp) {
if (idx_lp->free + idx_lp->dirty >= lp->free + lp->dirty) lp = idx_lp;
} else if (idx_lp && !lp)
lp = idx_lp;
if (lp) {
- ubifs_assert(lp->free + lp->dirty >= c->dead_wm);
+ ubifs_assert(c, lp->free + lp->dirty >= c->dead_wm);
goto found;
}
/* Did not find a dirty LEB on the dirty heaps, have to scan */
dbg_find("scanning LPT for a dirty LEB");
lp = scan_for_dirty(c, min_space, pick_free, exclude_index);
if (IS_ERR(lp)) {
err = PTR_ERR(lp);
goto out;
}
- ubifs_assert(lp->dirty >= c->dead_wm || (pick_free && lp->free + lp->dirty == c->leb_size));
+ ubifs_assert(c, lp->dirty >= c->dead_wm || (pick_free && lp->free + lp->dirty == c->leb_size));
found:
dbg_find("found LEB %d, free %d, dirty %d, flags %#x", lp->lnum, lp->free, lp->dirty, lp->flags);
lp = ubifs_change_lp(c, lp, LPROPS_NC, LPROPS_NC, lp->flags | LPROPS_TAKEN, 0);
if (IS_ERR(lp)) {
err = PTR_ERR(lp);
goto out;
}
memcpy(ret_lp, lp, sizeof(struct ubifs_lprops));
out:
ubifs_release_lprops(c);
return err;
}<sep>@@
identifier f,c;
expression e;
@@
f(...,struct ubifs_info *c,...) {
<...
ubifs_assert(
+ c,
e)
...>
}
<|end_of_text|> | 11,744 |
--- initial
+++ final
@@ -1,74 +1,74 @@
int ubifs_jnl_delete_xattr(struct ubifs_info *c, const struct inode *host, const struct inode *inode, const struct fscrypt_name *nm) {
int err, xlen, hlen, len, lnum, xent_offs, aligned_xlen;
struct ubifs_dent_node *xent;
struct ubifs_ino_node *ino;
union ubifs_key xent_key, key1, key2;
int sync = IS_DIRSYNC(host);
struct ubifs_inode *host_ui = ubifs_inode(host);
- ubifs_assert(inode->i_nlink == 0);
- ubifs_assert(mutex_is_locked(&host_ui->ui_mutex));
+ ubifs_assert(c, inode->i_nlink == 0);
+ ubifs_assert(c, mutex_is_locked(&host_ui->ui_mutex));
/*
* Since we are deleting the inode, we do not bother to attach any data
* to it and assume its length is %UBIFS_INO_NODE_SZ.
*/
xlen = UBIFS_DENT_NODE_SZ + fname_len(nm) + 1;
aligned_xlen = ALIGN(xlen, 8);
hlen = host_ui->data_len + UBIFS_INO_NODE_SZ;
len = aligned_xlen + UBIFS_INO_NODE_SZ + ALIGN(hlen, 8);
xent = kzalloc(len, GFP_NOFS);
if (!xent) return -ENOMEM;
/* Make reservation before allocating sequence numbers */
err = make_reservation(c, BASEHD, len);
if (err) {
kfree(xent);
return err;
}
xent->ch.node_type = UBIFS_XENT_NODE;
xent_key_init(c, &xent_key, host->i_ino, nm);
key_write(c, &xent_key, xent->key);
xent->inum = 0;
xent->type = get_dent_type(inode->i_mode);
xent->nlen = cpu_to_le16(fname_len(nm));
memcpy(xent->name, fname_name(nm), fname_len(nm));
xent->name[fname_len(nm)] = '\0';
zero_dent_node_unused(xent);
ubifs_prep_grp_node(c, xent, xlen, 0);
ino = (void *)xent + aligned_xlen;
pack_inode(c, ino, inode, 0);
ino = (void *)ino + UBIFS_INO_NODE_SZ;
pack_inode(c, ino, host, 1);
err = write_head(c, BASEHD, xent, len, &lnum, &xent_offs, sync);
if (!sync && !err) ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf, host->i_ino);
release_head(c, BASEHD);
kfree(xent);
if (err) goto out_ro;
/* Remove the extended attribute entry from TNC */
err = ubifs_tnc_remove_nm(c, &xent_key, nm);
if (err) goto out_ro;
err = ubifs_add_dirt(c, lnum, xlen);
if (err) goto out_ro;
/*
* Remove all nodes belonging to the extended attribute inode from TNC.
* Well, there actually must be only one node - the inode itself.
*/
lowest_ino_key(c, &key1, inode->i_ino);
highest_ino_key(c, &key2, inode->i_ino);
err = ubifs_tnc_remove_range(c, &key1, &key2);
if (err) goto out_ro;
err = ubifs_add_dirt(c, lnum, UBIFS_INO_NODE_SZ);
if (err) goto out_ro;
/* And update TNC with the new host inode position */
ino_key_init(c, &key1, host->i_ino);
err = ubifs_tnc_add(c, &key1, lnum, xent_offs + len - hlen, hlen);
if (err) goto out_ro;
finish_reservation(c);
spin_lock(&host_ui->ui_lock);
host_ui->synced_i_size = host_ui->ui_size;
spin_unlock(&host_ui->ui_lock);
mark_inode_clean(c, host_ui);
return 0;
out_ro:
ubifs_ro_mode(c, err);
finish_reservation(c);
return err;
}<sep>@@
identifier f,c;
expression e;
@@
f(...,struct ubifs_info *c,...) {
<...
ubifs_assert(
+ c,
e)
...>
}
<|end_of_text|> | 11,767 |
--- initial
+++ final
@@ -1,51 +1,51 @@
static int make_idx_node(struct ubifs_info *c, struct ubifs_idx_node *idx, struct ubifs_znode *znode, int lnum, int offs, int len) {
struct ubifs_znode *zp;
int i, err;
/* Make index node */
idx->ch.node_type = UBIFS_IDX_NODE;
idx->child_cnt = cpu_to_le16(znode->child_cnt);
idx->level = cpu_to_le16(znode->level);
for (i = 0; i < znode->child_cnt; i++) {
struct ubifs_branch *br = ubifs_idx_branch(c, idx, i);
struct ubifs_zbranch *zbr = &znode->zbranch[i];
key_write_idx(c, &zbr->key, &br->key);
br->lnum = cpu_to_le32(zbr->lnum);
br->offs = cpu_to_le32(zbr->offs);
br->len = cpu_to_le32(zbr->len);
if (!zbr->lnum || !zbr->len) {
ubifs_err(c, "bad ref in znode");
ubifs_dump_znode(c, znode);
if (zbr->znode) ubifs_dump_znode(c, zbr->znode);
return -EINVAL;
}
}
ubifs_prepare_node(c, idx, len, 0);
znode->lnum = lnum;
znode->offs = offs;
znode->len = len;
err = insert_old_idx_znode(c, znode);
/* Update the parent */
zp = znode->parent;
if (zp) {
struct ubifs_zbranch *zbr;
zbr = &zp->zbranch[znode->iip];
zbr->lnum = lnum;
zbr->offs = offs;
zbr->len = len;
} else {
c->zroot.lnum = lnum;
c->zroot.offs = offs;
c->zroot.len = len;
}
c->calc_idx_sz += ALIGN(len, 8);
atomic_long_dec(&c->dirty_zn_cnt);
- ubifs_assert(ubifs_zn_dirty(znode));
- ubifs_assert(ubifs_zn_cow(znode));
+ ubifs_assert(c, ubifs_zn_dirty(znode));
+ ubifs_assert(c, ubifs_zn_cow(znode));
/*
* Note, unlike 'write_index()' we do not add memory barriers here
* because this function is called with @c->tnc_mutex locked.
*/
__clear_bit(DIRTY_ZNODE, &znode->flags);
__clear_bit(COW_ZNODE, &znode->flags);
return err;
}<sep>@@
identifier f,c;
expression e;
@@
f(...,struct ubifs_info *c,...) {
<...
ubifs_assert(
+ c,
e)
...>
}
<|end_of_text|> | 11,845 |
--- initial
+++ final
@@ -1,15 +1,15 @@
int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len) {
int err;
- ubifs_assert(!c->ro_media && !c->ro_mount);
+ ubifs_assert(c, !c->ro_media && !c->ro_mount);
if (c->ro_error) return -EROFS;
if (!dbg_is_tst_rcvry(c))
err = ubi_leb_change(c->ubi, lnum, buf, len);
else
err = dbg_leb_change(c, lnum, buf, len);
if (err) {
ubifs_err(c, "changing %d bytes in LEB %d failed, error %d", len, lnum, err);
ubifs_ro_mode(c, err);
dump_stack();
}
return err;
}<sep>@@
identifier f,c;
expression e;
@@
f(...,struct ubifs_info *c,...) {
<...
ubifs_assert(
+ c,
e)
...>
}
<|end_of_text|> | 11,754 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.