// DSL_DEV_WinHost_Message_t m;
if (mei_arc_swap_buff == NULL) {
- mei_arc_swap_buff =
- (u32 *) kmalloc (MAXSWAPSIZE * 4, GFP_KERNEL);
+ mei_arc_swap_buff = kmalloc (MAXSWAPSIZE * 4, GFP_KERNEL);
if (mei_arc_swap_buff == NULL) {
IFX_MEI_EMSG (">>> malloc fail for codeswap buff!!! <<<\n");
return DSL_DEV_MEI_ERR_FAILURE;
u32 size; // Size of binary image in bytes
u32 checksum; // Checksum for image
u32 count; // Count of swp pages in image
- ARC_SWP_PAGE_HDR page[1]; // Should be "count" pages - '1' to make compiler happy
+ ARC_SWP_PAGE_HDR page[]; // Should be "count" pages - '1' to make compiler happy
} ARC_IMG_HDR;
typedef struct smmu_mem_info {
{
volatile u32 *dest;
- if ( code_src == 0 || ((unsigned long)code_src & 0x03) != 0
- || data_src == 0 || ((unsigned long)data_src & 0x03) != 0 )
+ if (!code_src || ((unsigned long)code_src & 0x03) != 0
+ || !data_src || ((unsigned long)data_src & 0x03) != 0 )
return -1;
if ( code_dword_len <= CDM_CODE_MEMORYn_DWLEN(0) )
{
volatile u32 *dest;
- if ( code_src == 0 || ((unsigned long)code_src & 0x03) != 0
- || data_src == 0 || ((unsigned long)data_src & 0x03) != 0 )
+ if (!code_src || ((unsigned long)code_src & 0x03) != 0
+ || !data_src || ((unsigned long)data_src & 0x03) != 0 )
return -1;
if ( code_dword_len <= CDM_CODE_MEMORYn_DWLEN(0) )
{
volatile u32 *dest;
- if ( code_src == 0 || ((unsigned long)code_src & 0x03) != 0
- || data_src == 0 || ((unsigned long)data_src & 0x03) != 0 )
+ if (!code_src || ((unsigned long)code_src & 0x03) != 0
+ || !data_src || ((unsigned long)data_src & 0x03) != 0 )
return -1;
if ( code_dword_len <= CDM_CODE_MEMORYn_DWLEN(0) )
unsigned int clr, set;
volatile u32 *dest;
- if ( code_src == 0 || ((unsigned long)code_src & 0x03) != 0
- || data_src == 0 || ((unsigned long)data_src & 0x03) != 0 )
+ if (!code_src || ((unsigned long)code_src & 0x03) != 0
+ || !data_src || ((unsigned long)data_src & 0x03) != 0 )
return -1;
clr = pp32 ? 0xF0 : 0x0F;
}
}
- if ( g_atm_priv_data.tx_skb_base != NULL )
- kfree(g_atm_priv_data.tx_skb_base);
-
- if ( g_atm_priv_data.tx_desc_base != NULL )
- kfree(g_atm_priv_data.tx_desc_base);
-
- if ( g_atm_priv_data.oam_buf_base != NULL )
- kfree(g_atm_priv_data.oam_buf_base);
-
- if ( g_atm_priv_data.oam_desc_base != NULL )
- kfree(g_atm_priv_data.oam_desc_base);
+ kfree(g_atm_priv_data.tx_skb_base);
+ kfree(g_atm_priv_data.tx_desc_base);
+ kfree(g_atm_priv_data.oam_buf_base);
+ kfree(g_atm_priv_data.oam_desc_base);
if ( g_atm_priv_data.aal_desc_base != NULL ) {
for ( i = 0; i < dma_rx_descriptor_length; i++ ) {
dev_kfree_skb_any(skb);
}
}
- kfree(g_atm_priv_data.aal_desc_base);
}
+
+ kfree(g_atm_priv_data.aal_desc_base);
}
static inline void init_rx_tables(void)
aes->IV2R = DEU_ENDIAN_SWAP(*((u32 *) iv_arg + 1));
aes->IV1R = DEU_ENDIAN_SWAP(*((u32 *) iv_arg + 2));
aes->IV0R = DEU_ENDIAN_SWAP(*((u32 *) iv_arg + 3));
- };
+ }
i = 0;
{
struct aes_ctx *mctx = crypto_tfm_ctx(tfm);
mctx->temp = kzalloc(AES_BLOCK_SIZE * AES_CBCMAC_DBN_TEMP_SIZE, GFP_KERNEL);
- if (IS_ERR(mctx->temp)) return PTR_ERR(mctx->temp);
+ if (!mctx->temp) return -ENOMEM;
return 0;
}
aes->IV2R = DEU_ENDIAN_SWAP(*((u32 *) iv_arg + 1));
aes->IV1R = DEU_ENDIAN_SWAP(*((u32 *) iv_arg + 2));
aes->IV0R = DEU_ENDIAN_SWAP(*((u32 *) iv_arg + 3));
- };
+ }
/* Prepare Rx buf length used in dma psuedo interrupt */
while (aes->controlr.BUS) {
// wait for AES to be ready
- };
+ }
deu_priv->outcopy = (u32 *) DEU_DWORD_REORDERING(out_arg, aes_buff_out, BUFFER_OUT, nbytes);
deu_priv->event_src = AES_ASYNC_EVENT;
u32 remain, inc, nbytes = areq->nbytes;
u32 chunk_bytes = src->length;
-
- aes_con = (struct aes_container *)kmalloc(sizeof(struct aes_container),
- GFP_KERNEL);
-
+ aes_con = kmalloc(sizeof(struct aes_container), GFP_KERNEL);
if (!(aes_con)) {
printk("Cannot allocate memory for AES container, fn %s, ln %d\n",
__func__, __LINE__);
if (mode > 0) {
des->IVHR = DEU_ENDIAN_SWAP(*(u32 *) iv_arg);
des->IVLR = DEU_ENDIAN_SWAP(*((u32 *) iv_arg + 1));
- };
+ }
/* memory alignment issue */
dword_mem_aligned_in = (u32 *) DEU_DWORD_REORDERING(in_arg, des_buff_in, BUFFER_IN, nbytes);
dma->controlr.EN = 1;
while (des->controlr.BUS) {
- };
+ }
wlen = dma_device_write (dma_device, (u8 *) dword_mem_aligned_in, nbytes, NULL);
if (wlen != nbytes) {
if (mode > 0) {
*(u32 *) iv_arg = DEU_ENDIAN_SWAP(des->IVHR);
*((u32 *) iv_arg + 1) = DEU_ENDIAN_SWAP(des->IVLR);
- };
+ }
CRTCL_SECT_END;
u32 remain, inc, nbytes = areq->nbytes;
u32 chunk_bytes = src->length;
- des_con = (struct des_container *)kmalloc(sizeof(struct des_container),
- GFP_KERNEL);
-
+ des_con = kmalloc(sizeof(struct des_container), GFP_KERNEL);
if (!(des_con)) {
printk("Cannot allocate memory for AES container, fn %s, ln %d\n",
__func__, __LINE__);
if (mode > 0) {
des->IVHR = DEU_ENDIAN_SWAP(*(u32 *) iv_arg);
des->IVLR = DEU_ENDIAN_SWAP(*((u32 *) iv_arg + 1));
- };
+ }
nblocks = nbytes / 4;
if (mode > 0) {
*(u32 *) iv_arg = DEU_ENDIAN_SWAP(des->IVHR);
*((u32 *) iv_arg + 1) = DEU_ENDIAN_SWAP(des->IVLR);
- };
+ }
CRTCL_SECT_END;
}
volatile struct aes_t *aes = (volatile struct aes_t *) AES_START; \
for (i = 0; i < 10; i++) \
udelay(DELAY_PERIOD); \
- while (dma->controlr.BSY) {}; \
- while (aes->controlr.BUS) {}; \
+ while (dma->controlr.BSY) {} \
+ while (aes->controlr.BUS) {} \
} while (0)
#define WAIT_DES_DMA_READY() \
volatile struct des_t *des = (struct des_t *) DES_3DES_START; \
for (i = 0; i < 10; i++) \
udelay(DELAY_PERIOD); \
- while (dma->controlr.BSY) {}; \
- while (des->controlr.BUS) {}; \
+ while (dma->controlr.BSY) {} \
+ while (des->controlr.BUS) {} \
} while (0)
#define AES_DMA_MISC_CONFIG() \
volatile struct aes_t *aes = (volatile struct aes_t *) AES_START; \
for (i = 0; i < 10; i++) \
udelay(DELAY_PERIOD); \
- while (dma->controlr.BSY) {}; \
- while (aes->controlr.BUS) {}; \
+ while (dma->controlr.BSY) {} \
+ while (aes->controlr.BUS) {} \
} while (0)
#define WAIT_DES_DMA_READY() \
volatile struct des_t *des = (struct des_t *) DES_3DES_START; \
for (i = 0; i < 10; i++) \
udelay(DELAY_PERIOD); \
- while (dma->controlr.BSY) {}; \
- while (des->controlr.BUS) {}; \
+ while (dma->controlr.BSY) {} \
+ while (des->controlr.BUS) {} \
} while (0)
#define SHA_HASH_INIT \
volatile struct aes_t *aes = (volatile struct aes_t *) AES_START; \
for (i = 0; i < 10; i++) \
udelay(DELAY_PERIOD); \
- while (dma->controlr.BSY) {}; \
- while (aes->controlr.BUS) {}; \
+ while (dma->controlr.BSY) {} \
+ while (aes->controlr.BUS) {} \
} while (0)
#define WAIT_DES_DMA_READY() \
volatile struct des_t *des = (struct des_t *) DES_3DES_START; \
for (i = 0; i < 10; i++) \
udelay(DELAY_PERIOD); \
- while (dma->controlr.BSY) {}; \
- while (des->controlr.BUS) {}; \
+ while (dma->controlr.BSY) {} \
+ while (des->controlr.BUS) {} \
} while (0)
#define AES_DMA_MISC_CONFIG() \
for (i = 0; i < 16; i++) {
hashs->MR = in[i];
// printk("in[%d]: %08x\n", i, in[i]);
- };
+ }
//wait for processing
while (hashs->controlr.BSY) {
{
for (i = 0; i < 16; i++) {
hashs->MR = in[i];
- };
+ }
hashs->controlr.GO = 1;
asm("sync");
{
struct md5_hmac_ctx *mctx = crypto_tfm_ctx(tfm);
mctx->temp = kzalloc(4 * MD5_HMAC_DBN_TEMP_SIZE, GFP_KERNEL);
- if (IS_ERR(mctx->temp)) return PTR_ERR(mctx->temp);
+ if (!mctx->temp) return -ENOMEM;
mctx->desc = kzalloc(sizeof(struct shash_desc), GFP_KERNEL);
- if (IS_ERR(mctx->desc)) return PTR_ERR(mctx->desc);
+ if (!mctx->desc) return -ENOMEM;
return 0;
}
for (i = 0; i < 16; i++) {
hashs->MR = in[i];
- };
+ }
//wait for processing
while (hashs->controlr.BSY) {
{
for (i = 0; i < 16; i++) {
hashs->MR = in[i];
- };
+ }
hashs->controlr.GO = 1;
asm("sync");
{
struct sha1_hmac_ctx *sctx = crypto_tfm_ctx(tfm);
sctx->temp = kzalloc(4 * SHA1_HMAC_DBN_TEMP_SIZE, GFP_KERNEL);
- if (IS_ERR(sctx->temp)) return PTR_ERR(sctx->temp);
+ if (!sctx->temp) return -ENOMEM;
sctx->desc = kzalloc(sizeof(struct shash_desc), GFP_KERNEL);
- if (IS_ERR(sctx->desc)) return PTR_ERR(sctx->desc);
+ if (!sctx->desc) return -ENOMEM;
return 0;
}
}
}
- if ( g_ptm_priv_data.rx_desc_base != NULL )
- kfree(g_ptm_priv_data.rx_desc_base);
-
- if ( g_ptm_priv_data.tx_desc_base != NULL )
- kfree(g_ptm_priv_data.tx_desc_base);
-
- if ( g_ptm_priv_data.tx_skb_base != NULL )
- kfree(g_ptm_priv_data.tx_skb_base);
+ kfree(g_ptm_priv_data.rx_desc_base);
+ kfree(g_ptm_priv_data.tx_desc_base);
+ kfree(g_ptm_priv_data.tx_skb_base);
}
static INLINE void init_tables(void)
{
volatile u32 *dest;
- if ( code_src == 0 || ((unsigned long)code_src & 0x03) != 0
- || data_src == 0 || ((unsigned long)data_src & 0x03) != 0 )
+ if (!code_src || ((unsigned long)code_src & 0x03) != 0
+ || !data_src || ((unsigned long)data_src & 0x03) != 0 )
return -1;
if ( code_dword_len <= CDM_CODE_MEMORYn_DWLEN(0) )
{
volatile u32 *dest;
- if ( code_src == 0 || ((unsigned long)code_src & 0x03) != 0
- || data_src == 0 || ((unsigned long)data_src & 0x03) != 0 )
+ if (!code_src || ((unsigned long)code_src & 0x03) != 0
+ || !data_src || ((unsigned long)data_src & 0x03) != 0 )
return -1;
if ( code_dword_len <= CDM_CODE_MEMORYn_DWLEN(0) )
{
volatile u32 *dest;
- if ( code_src == 0 || ((unsigned long)code_src & 0x03) != 0
- || data_src == 0 || ((unsigned long)data_src & 0x03) != 0 )
+ if (!code_src || ((unsigned long)code_src & 0x03) != 0
+ || !data_src || ((unsigned long)data_src & 0x03) != 0 )
return -1;
if ( code_dword_len <= CDM_CODE_MEMORYn_DWLEN(0) )
unsigned int clr, set;
volatile u32 *dest;
- if ( code_src == 0 || ((unsigned long)code_src & 0x03) != 0
- || data_src == 0 || ((unsigned long)data_src & 0x03) != 0 )
+ if (!code_src || ((unsigned long)code_src & 0x03) != 0
+ || !data_src || ((unsigned long)data_src & 0x03) != 0 )
return -1;
clr = pp32 ? 0xF0 : 0x0F;
if (sclhi() < 0) { /* timed out */
sdahi(); /* we don't want to block the net */
return -ETIMEDOUT;
- };
+ }
scllo();
}
sdahi();
if (sclhi() < 0) {
return -ETIMEDOUT;
- };
+ }
/* read ack: SDA should be pulled down by slave */
ack = getsda() == 0; /* ack: sda is pulled low ->success. */
scllo();
for (i = 0; i < 8; i++) {
if (sclhi() < 0) {
return -ETIMEDOUT;
- };
+ }
indata *= 2;
if (getsda())
indata |= 0x01;
putc(tmp[i]);
while (size-- > 0)
- putc(' ');;
+ putc(' ');
return 1;
}
AT91PS_DataFlash pFlash = &DataFlashInst;
pFlash = AT91F_DataflashSelect (pFlash, &AddrToRead);
- if (pFlash == 0)
+ if (!pFlash)
return -1;
return (AT91F_DataFlashRead(pFlash, AddrToRead, size, result));
}
nfc->irq = platform_get_irq(pdev, 0);
- if (nfc->irq < 0) {
- dev_err(&pdev->dev, "no IRQ resource specified\n");
+ if (nfc->irq < 0)
return -EINVAL;
- }
init_waitqueue_head(&nfc->irq_waitq);
ret = devm_request_irq(&pdev->dev, nfc->irq, ar934x_nfc_irq_handler,
{
int mi = 1;
int i;
- int symbol = 0;
#ifdef _LZMA_LOC_OPT
RC_INIT_VAR
#endif
#ifdef _LZMA_LOC_OPT
RC_FLUSH_VAR
#endif
- return symbol;
+ return 0;
}
Byte LzmaLiteralDecode(CProb *probs, CRangeDecoder *rd)
if (bmtd.mtd != mtd)
return;
- if (bmtd.debugfs_dir)
- debugfs_remove_recursive(bmtd.debugfs_dir);
+ debugfs_remove_recursive(bmtd.debugfs_dir);
bmtd.debugfs_dir = NULL;
kfree(bmtd.bbt_buf);
next_block:
ba--;
- };
+ }
return false;
}
return true;
}
}
- };
+ }
return false;
}
mode == PHY_INTERFACE_MODE_REVMII) {
b53_read8(dev, B53_CTRL_PAGE,
B53_PORT_OVERRIDE_CTRL, &po);
- if (!(po & PORT_OVERRIDE_RV_MII_25))
- pr_err("Failed to enable reverse MII mode\n");
- return -EINVAL;
+ if (!(po & PORT_OVERRIDE_RV_MII_25)) {
+ pr_err("Failed to enable reverse MII mode\n");
+ of_node_put(dn);
+ return -EINVAL;
+ }
}
} else {
po |= GMII_PO_EN;
if (!(port->flags & BIT(SWITCH_PORT_FLAG_TAGGED))) {
vlan->untag |= BIT(port->id);
priv->ports[port->id].pvid = val->port_vlan;
- };
+ }
}
/* ignore disabled ports */
platform_set_drvdata(pdev, NULL);
rtl8366_smi_cleanup(smi);
err_free_smi:
- if (smi->emu_vlanmc)
- kfree(smi->emu_vlanmc);
+ kfree(smi->emu_vlanmc);
kfree(smi);
return err;
}
ret = -ENODATA;
goto fail;
}
- };
+ }
templen -= (u8 *)needle - tempbuf;
/* Past magic. Look for tag node */
{
rtk_uint32 retVal, counter=0;
rtk_uint8 controlByte_W, controlByte_R;
- rtk_uint8 slaveRegAddr_L, slaveRegAddr_H = 0x0, temp;
+ rtk_uint8 slaveRegAddr_L, slaveRegAddr_H = 0x0;
rtk_uint8 regData_L, regData_H;
/* control byte :deviceAddress + W, deviceAddress + R */
slaveRegAddr_H = (rtk_uint8) (slaveRegAddr >>8) ;
if( rtk_i2c_mode == I2C_70B_LSB_16BIT_MODE)
- {
- temp = slaveRegAddr_L ;
- slaveRegAddr_L = slaveRegAddr_H;
- slaveRegAddr_H = temp;
- }
+ swap(slaveRegAddr_L, slaveRegAddr_H);
/*check bus state: idle*/
{
rtk_uint32 retVal,counter;
rtk_uint8 controlByte_W;
- rtk_uint8 slaveRegAddr_L, slaveRegAddr_H = 0x0, temp;
+ rtk_uint8 slaveRegAddr_L, slaveRegAddr_H = 0x0;
rtk_uint8 regData_L, regData_H;
/* control byte :deviceAddress + W */
regData_L = (rtk_uint8) (regData & 0x00FF);
if( rtk_i2c_mode == I2C_70B_LSB_16BIT_MODE)
- {
- temp = slaveRegAddr_L ;
- slaveRegAddr_L = slaveRegAddr_H;
- slaveRegAddr_H = temp;
- }
+ swap(slaveRegAddr_L, slaveRegAddr_H);
/*check bus state: idle*/
break;
default:
break;
- };
+ }
return RT_ERR_OK;
break;
default:
break;
- };
+ }
return RT_ERR_OK;
int rt3050_esw_init(struct fe_priv *priv)
{
struct device_node *np = priv->switch_np;
- struct platform_device *pdev = of_find_device_by_node(np);
+ struct platform_device *pdev;
struct switch_dev *swdev;
struct rt305x_esw *esw;
const __be32 *rgmii;
if (!of_device_is_compatible(np, ralink_esw_match->compatible))
return -EINVAL;
+ pdev = of_find_device_by_node(np);
esw = platform_get_drvdata(pdev);
- if (!esw)
+ if (!esw) {
+ put_device(&pdev->dev);
return -EPROBE_DEFER;
+ }
priv->soc->swpriv = esw;
esw->priv = priv;
dev_err(&pdev->dev, "RGMII mode, not exporting switch device.\n");
unregister_switch(&esw->swdev);
platform_set_drvdata(pdev, NULL);
+ put_device(&pdev->dev);
return -ENODEV;
}
struct device_node *eth_node = priv->dev->of_node;
struct device_node *phy_node, *mdiobus_node;
struct device_node *np = priv->switch_np;
- struct platform_device *pdev = of_find_device_by_node(np);
+ struct platform_device *pdev;
struct mt7620_gsw *gsw;
const __be32 *id;
int ret;
u8 val;
- if (!pdev)
- return -ENODEV;
-
if (!of_device_is_compatible(np, mediatek_gsw_match->compatible))
return -EINVAL;
+ pdev = of_find_device_by_node(np);
+ if (!pdev)
+ return -ENODEV;
+
gsw = platform_get_drvdata(pdev);
priv->soc->swpriv = gsw;
ret = devm_request_irq(&pdev->dev, gsw->irq, gsw_interrupt_mt7620, 0,
"gsw", priv);
if (ret) {
+ put_device(&pdev->dev);
dev_err(&pdev->dev, "Failed to request irq");
return ret;
}
mtk_switch_w32(gsw, ~PORT_IRQ_ST_CHG, GSW_REG_IMR);
}
+ put_device(&pdev->dev);
return 0;
}
return;
mdiobus_unregister(priv->mii_bus);
- of_node_put(priv->mii_bus->dev.of_node);
+ put_device(&priv->mii_bus->dev);
kfree(priv->mii_bus);
}
netdev->base_addr = (unsigned long)fe_base;
netdev->irq = platform_get_irq(pdev, 0);
- if (netdev->irq < 0) {
- dev_err(&pdev->dev, "no IRQ resource found\n");
+ if (netdev->irq < 0)
return -ENXIO;
- }
priv = netdev_priv(netdev);
spin_lock_init(&priv->page_lock);
return -ENXIO;
rtcl_ccu = kzalloc(sizeof(*rtcl_ccu), GFP_KERNEL);
- if (IS_ERR(rtcl_ccu))
+ if (!rtcl_ccu)
return -ENOMEM;
rtcl_ccu->np = np;
rtcl_ccu->sram.pmark = (int *)((void *)sram_pbase + (dram_size - 4));
rtcl_ccu->sram.vbase = sram_vbase;
+ put_device(&pdev->dev);
return 0;
err_put_device:
sprintf(led_set_str, "led_set%d", led_set);
priv->ports[pn].leds_on_this_port = of_property_count_u32_elems(led_node, led_set_str);
if (priv->ports[pn].leds_on_this_port > 4) {
+ of_node_put(dn);
dev_err(priv->dev, "led_set %d for port %d configuration is invalid\n", led_set, pn);
return -ENODEV;
}
/* Obtain device IRQ number */
dev->irq = platform_get_irq(pdev, 0);
- if (dev->irq < 0) {
- dev_err(&pdev->dev, "cannot obtain network-device IRQ\n");
- return err;
- }
+ if (dev->irq < 0)
+ return -ENODEV;
err = devm_request_irq(&pdev->dev, dev->irq, priv->r->net_irq,
IRQF_SHARED, dev->name, dev);