text
stringlengths 213
7.14k
| idx
int64 16
12.5k
|
---|---|
--- initial
+++ final
@@ -1,24 +1,24 @@
static struct ubifs_pnode *dirty_cow_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode) {
struct ubifs_pnode *p;
if (!test_bit(COW_CNODE, &pnode->flags)) {
/* pnode is not being committed */
if (!test_and_set_bit(DIRTY_CNODE, &pnode->flags)) {
c->dirty_pn_cnt += 1;
add_pnode_dirt(c, pnode);
}
return pnode;
}
/* pnode is being committed, so copy it */
p = kmemdup(pnode, sizeof(struct ubifs_pnode), GFP_NOFS);
if (unlikely(!p)) return ERR_PTR(-ENOMEM);
p->cnext = NULL;
__set_bit(DIRTY_CNODE, &p->flags);
__clear_bit(COW_CNODE, &p->flags);
replace_cats(c, pnode, p);
- ubifs_assert(!test_bit(OBSOLETE_CNODE, &pnode->flags));
+ ubifs_assert(c, !test_bit(OBSOLETE_CNODE, &pnode->flags));
__set_bit(OBSOLETE_CNODE, &pnode->flags);
c->dirty_pn_cnt += 1;
add_pnode_dirt(c, pnode);
pnode->parent->nbranch[p->iip].pnode = p;
return p;
}<sep>@@
identifier f,c;
expression e;
@@
f(...,struct ubifs_info *c,...) {
<...
ubifs_assert(
+ c,
e)
...>
}
<|end_of_text|> | 11,805 |
--- initial
+++ final
@@ -1,13 +1,13 @@
static int check_lpt_crc(const struct ubifs_info *c, void *buf, int len) {
int pos = 0;
uint8_t *addr = buf;
uint16_t crc, calc_crc;
- crc = ubifs_unpack_bits(&addr, &pos, UBIFS_LPT_CRC_BITS);
+ crc = ubifs_unpack_bits(c, &addr, &pos, UBIFS_LPT_CRC_BITS);
calc_crc = crc16(-1, buf + UBIFS_LPT_CRC_BYTES, len - UBIFS_LPT_CRC_BYTES);
if (crc != calc_crc) {
ubifs_err(c, "invalid crc in LPT node: crc %hx calc %hx", crc, calc_crc);
dump_stack();
return -EINVAL;
}
return 0;
}<sep>@@
identifier f,c;
expression e1,e2,e3;
@@
f(...,struct ubifs_info *c,...) {
<...
ubifs_unpack_bits(
+ c,
e1,e2,e3)
...>
}
<|end_of_text|> | 11,791 |
--- initial
+++ final
@@ -1,17 +1,17 @@
static const struct ubifs_lprops *scan_for_leb_for_idx(struct ubifs_info *c) {
const struct ubifs_lprops *lprops;
struct scan_data data;
int err;
data.lnum = -1;
err = ubifs_lpt_scan_nolock(c, -1, c->lscan_lnum, (ubifs_lpt_scan_callback)scan_for_idx_cb, &data);
if (err) return ERR_PTR(err);
- ubifs_assert(data.lnum >= c->main_first && data.lnum < c->leb_cnt);
+ ubifs_assert(c, data.lnum >= c->main_first && data.lnum < c->leb_cnt);
c->lscan_lnum = data.lnum;
lprops = ubifs_lpt_lookup_dirty(c, data.lnum);
if (IS_ERR(lprops)) return lprops;
- ubifs_assert(lprops->lnum == data.lnum);
- ubifs_assert(lprops->free + lprops->dirty == c->leb_size);
- ubifs_assert(!(lprops->flags & LPROPS_TAKEN));
- ubifs_assert(!(lprops->flags & LPROPS_INDEX));
+ ubifs_assert(c, lprops->lnum == data.lnum);
+ ubifs_assert(c, lprops->free + lprops->dirty == c->leb_size);
+ ubifs_assert(c, !(lprops->flags & LPROPS_TAKEN));
+ ubifs_assert(c, !(lprops->flags & LPROPS_INDEX));
return lprops;
}<sep>@@
identifier f,c;
expression e;
@@
f(...,struct ubifs_info *c,...) {
<...
ubifs_assert(
+ c,
e)
...>
}
<|end_of_text|> | 11,743 |
--- initial
+++ final
@@ -1,72 +1,72 @@
static int populate_page(struct ubifs_info *c, struct page *page, struct bu_info *bu, int *n) {
int i = 0, nn = *n, offs = bu->zbranch[0].offs, hole = 0, read = 0;
struct inode *inode = page->mapping->host;
loff_t i_size = i_size_read(inode);
unsigned int page_block;
void *addr, *zaddr;
pgoff_t end_index;
dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx", inode->i_ino, page->index, i_size, page->flags);
addr = zaddr = kmap(page);
end_index = (i_size - 1) >> PAGE_SHIFT;
if (!i_size || page->index > end_index) {
hole = 1;
memset(addr, 0, PAGE_SIZE);
goto out_hole;
}
page_block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
while (1) {
int err, len, out_len, dlen;
if (nn >= bu->cnt) {
hole = 1;
memset(addr, 0, UBIFS_BLOCK_SIZE);
} else if (key_block(c, &bu->zbranch[nn].key) == page_block) {
struct ubifs_data_node *dn;
dn = bu->buf + (bu->zbranch[nn].offs - offs);
- ubifs_assert(le64_to_cpu(dn->ch.sqnum) > ubifs_inode(inode)->creat_sqnum);
+ ubifs_assert(c, le64_to_cpu(dn->ch.sqnum) > ubifs_inode(inode)->creat_sqnum);
len = le32_to_cpu(dn->size);
if (len <= 0 || len > UBIFS_BLOCK_SIZE) goto out_err;
dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
out_len = UBIFS_BLOCK_SIZE;
if (ubifs_crypt_is_encrypted(inode)) {
err = ubifs_decrypt(inode, dn, &dlen, page_block);
if (err) goto out_err;
}
err = ubifs_decompress(c, &dn->data, dlen, addr, &out_len, le16_to_cpu(dn->compr_type));
if (err || len != out_len) goto out_err;
if (len < UBIFS_BLOCK_SIZE) memset(addr + len, 0, UBIFS_BLOCK_SIZE - len);
nn += 1;
read = (i << UBIFS_BLOCK_SHIFT) + len;
} else if (key_block(c, &bu->zbranch[nn].key) < page_block) {
nn += 1;
continue;
} else {
hole = 1;
memset(addr, 0, UBIFS_BLOCK_SIZE);
}
if (++i >= UBIFS_BLOCKS_PER_PAGE) break;
addr += UBIFS_BLOCK_SIZE;
page_block += 1;
}
if (end_index == page->index) {
int len = i_size & (PAGE_SIZE - 1);
if (len && len < read) memset(zaddr + len, 0, read - len);
}
out_hole:
if (hole) {
SetPageChecked(page);
dbg_gen("hole");
}
SetPageUptodate(page);
ClearPageError(page);
flush_dcache_page(page);
kunmap(page);
*n = nn;
return 0;
out_err:
ClearPageUptodate(page);
SetPageError(page);
flush_dcache_page(page);
kunmap(page);
ubifs_err(c, "bad data node (block %u, inode %lu)", page_block, inode->i_ino);
return -EINVAL;
}<sep>@@
identifier f,c;
expression e;
@@
f(...,struct ubifs_info *c,...) {
<...
ubifs_assert(
+ c,
e)
...>
}
<|end_of_text|> | 11,737 |
--- initial
+++ final
@@ -1,126 +1,126 @@
static int ubifs_remount_rw(struct ubifs_info *c) {
int err, lnum;
if (c->rw_incompat) {
ubifs_err(c, "the file-system is not R/W-compatible");
ubifs_msg(c, "on-flash format version is w%d/r%d, but software only supports up to version w%d/r%d", c->fmt_version, c->ro_compat_version, UBIFS_FORMAT_VERSION, UBIFS_RO_COMPAT_VERSION);
return -EROFS;
}
mutex_lock(&c->umount_mutex);
dbg_save_space_info(c);
c->remounting_rw = 1;
c->ro_mount = 0;
if (c->space_fixup) {
err = ubifs_fixup_free_space(c);
if (err) goto out;
}
err = check_free_space(c);
if (err) goto out;
if (c->old_leb_cnt != c->leb_cnt) {
struct ubifs_sb_node *sup;
sup = ubifs_read_sb_node(c);
if (IS_ERR(sup)) {
err = PTR_ERR(sup);
goto out;
}
sup->leb_cnt = cpu_to_le32(c->leb_cnt);
err = ubifs_write_sb_node(c, sup);
kfree(sup);
if (err) goto out;
}
if (c->need_recovery) {
ubifs_msg(c, "completing deferred recovery");
err = ubifs_write_rcvrd_mst_node(c);
if (err) goto out;
err = ubifs_recover_size(c);
if (err) goto out;
err = ubifs_clean_lebs(c, c->sbuf);
if (err) goto out;
err = ubifs_recover_inl_heads(c, c->sbuf);
if (err) goto out;
} else {
/* A readonly mount is not allowed to have orphans */
- ubifs_assert(c->tot_orphans == 0);
+ ubifs_assert(c, c->tot_orphans == 0);
err = ubifs_clear_orphans(c);
if (err) goto out;
}
if (!(c->mst_node->flags & cpu_to_le32(UBIFS_MST_DIRTY))) {
c->mst_node->flags |= cpu_to_le32(UBIFS_MST_DIRTY);
err = ubifs_write_master(c);
if (err) goto out;
}
c->ileb_buf = vmalloc(c->leb_size);
if (!c->ileb_buf) {
err = -ENOMEM;
goto out;
}
c->write_reserve_buf = kmalloc(COMPRESSED_DATA_NODE_BUF_SZ + UBIFS_CIPHER_BLOCK_SIZE, GFP_KERNEL);
if (!c->write_reserve_buf) {
err = -ENOMEM;
goto out;
}
err = ubifs_lpt_init(c, 0, 1);
if (err) goto out;
/* Create background thread */
c->bgt = kthread_create(ubifs_bg_thread, c, "%s", c->bgt_name);
if (IS_ERR(c->bgt)) {
err = PTR_ERR(c->bgt);
c->bgt = NULL;
ubifs_err(c, "cannot spawn \"%s\", error %d", c->bgt_name, err);
goto out;
}
wake_up_process(c->bgt);
c->orph_buf = vmalloc(c->leb_size);
if (!c->orph_buf) {
err = -ENOMEM;
goto out;
}
/* Check for enough log space */
lnum = c->lhead_lnum + 1;
if (lnum >= UBIFS_LOG_LNUM + c->log_lebs) lnum = UBIFS_LOG_LNUM;
if (lnum == c->ltail_lnum) {
err = ubifs_consolidate_log(c);
if (err) goto out;
}
if (c->need_recovery)
err = ubifs_rcvry_gc_commit(c);
else
err = ubifs_leb_unmap(c, c->gc_lnum);
if (err) goto out;
dbg_gen("re-mounted read-write");
c->remounting_rw = 0;
if (c->need_recovery) {
c->need_recovery = 0;
ubifs_msg(c, "deferred recovery completed");
} else {
/*
* Do not run the debugging space check if the were doing
* recovery, because when we saved the information we had the
* file-system in a state where the TNC and lprops has been
* modified in memory, but all the I/O operations (including a
* commit) were deferred. So the file-system was in
* "non-committed" state. Now the file-system is in committed
* state, and of course the amount of free space will change
* because, for example, the old index size was imprecise.
*/
err = dbg_check_space_info(c);
}
mutex_unlock(&c->umount_mutex);
return err;
out:
c->ro_mount = 1;
vfree(c->orph_buf);
c->orph_buf = NULL;
if (c->bgt) {
kthread_stop(c->bgt);
c->bgt = NULL;
}
free_wbufs(c);
kfree(c->write_reserve_buf);
c->write_reserve_buf = NULL;
vfree(c->ileb_buf);
c->ileb_buf = NULL;
ubifs_lpt_free(c, 1);
c->remounting_rw = 0;
mutex_unlock(&c->umount_mutex);
return err;
}<sep>@@
identifier f,c;
expression e;
@@
f(...,struct ubifs_info *c,...) {
<...
ubifs_assert(
+ c,
e)
...>
}
<|end_of_text|> | 11,840 |
--- initial
+++ final
@@ -1,91 +1,91 @@
int ubifs_lookup_level0(struct ubifs_info *c, const union ubifs_key *key, struct ubifs_znode **zn, int *n) {
int err, exact;
struct ubifs_znode *znode;
time64_t time = ktime_get_seconds();
dbg_tnck(key, "search key ");
- ubifs_assert(key_type(c, key) < UBIFS_INVALID_KEY);
+ ubifs_assert(c, key_type(c, key) < UBIFS_INVALID_KEY);
znode = c->zroot.znode;
if (unlikely(!znode)) {
znode = ubifs_load_znode(c, &c->zroot, NULL, 0);
if (IS_ERR(znode)) return PTR_ERR(znode);
}
znode->time = time;
while (1) {
struct ubifs_zbranch *zbr;
exact = ubifs_search_zbranch(c, znode, key, n);
if (znode->level == 0) break;
if (*n < 0) *n = 0;
zbr = &znode->zbranch[*n];
if (zbr->znode) {
znode->time = time;
znode = zbr->znode;
continue;
}
/* znode is not in TNC cache, load it from the media */
znode = ubifs_load_znode(c, zbr, znode, *n);
if (IS_ERR(znode)) return PTR_ERR(znode);
}
*zn = znode;
if (exact || !is_hash_key(c, key) || *n != -1) {
dbg_tnc("found %d, lvl %d, n %d", exact, znode->level, *n);
return exact;
}
/*
* Here is a tricky place. We have not found the key and this is a
* "hashed" key, which may collide. The rest of the code deals with
* situations like this:
*
* | 3 | 5 |
* / \
* | 3 | 5 | | 6 | 7 | (x)
*
* Or more a complex example:
*
* | 1 | 5 |
* / \
* | 1 | 3 | | 5 | 8 |
* \ /
* | 5 | 5 | | 6 | 7 | (x)
*
* In the examples, if we are looking for key "5", we may reach nodes
* marked with "(x)". In this case what we have do is to look at the
* left and see if there is "5" key there. If there is, we have to
* return it.
*
* Note, this whole situation is possible because we allow to have
* elements which are equivalent to the next key in the parent in the
* children of current znode. For example, this happens if we split a
* znode like this: | 3 | 5 | 5 | 6 | 7 |, which results in something
* like this:
* | 3 | 5 |
* / \
* | 3 | 5 | | 5 | 6 | 7 |
* ^
* And this becomes what is at the first "picture" after key "5" marked
* with "^" is removed. What could be done is we could prohibit
* splitting in the middle of the colliding sequence. Also, when
* removing the leftmost key, we would have to correct the key of the
* parent node, which would introduce additional complications. Namely,
* if we changed the leftmost key of the parent znode, the garbage
* collector would be unable to find it (GC is doing this when GC'ing
* indexing LEBs). Although we already have an additional RB-tree where
* we save such changed znodes (see 'ins_clr_old_idx_znode()') until
* after the commit. But anyway, this does not look easy to implement
* so we did not try this.
*/
err = tnc_prev(c, &znode, n);
if (err == -ENOENT) {
dbg_tnc("found 0, lvl %d, n -1", znode->level);
*n = -1;
return 0;
}
if (unlikely(err < 0)) return err;
if (keys_cmp(c, key, &znode->zbranch[*n].key)) {
dbg_tnc("found 0, lvl %d, n -1", znode->level);
*n = -1;
return 0;
}
dbg_tnc("found 1, lvl %d, n %d", znode->level, *n);
*zn = znode;
return 1;
}<sep>@@
identifier f,c;
expression e;
@@
f(...,struct ubifs_info *c,...) {
<...
ubifs_assert(
+ c,
e)
...>
}
<|end_of_text|> | 11,866 |
--- initial
+++ final
@@ -1,46 +1,46 @@
int dbg_check_lpt_nodes(struct ubifs_info *c, struct ubifs_cnode *cnode, int row, int col) {
struct ubifs_nnode *nnode, *nn;
struct ubifs_cnode *cn;
int num, iip = 0, err;
if (!dbg_is_chk_lprops(c)) return 0;
while (cnode) {
- ubifs_assert(row >= 0);
+ ubifs_assert(c, row >= 0);
nnode = cnode->parent;
if (cnode->level) {
/* cnode is a nnode */
num = calc_nnode_num(row, col);
if (cnode->num != num) {
ubifs_err(c, "nnode num %d expected %d parent num %d iip %d", cnode->num, num, (nnode ? nnode->num : 0), cnode->iip);
return -EINVAL;
}
nn = (struct ubifs_nnode *)cnode;
while (iip < UBIFS_LPT_FANOUT) {
cn = nn->nbranch[iip].cnode;
if (cn) {
/* Go down */
row += 1;
col <<= UBIFS_LPT_FANOUT_SHIFT;
col += iip;
iip = 0;
cnode = cn;
break;
}
/* Go right */
iip += 1;
}
if (iip < UBIFS_LPT_FANOUT) continue;
} else {
struct ubifs_pnode *pnode;
/* cnode is a pnode */
pnode = (struct ubifs_pnode *)cnode;
err = dbg_chk_pnode(c, pnode, col);
if (err) return err;
}
/* Go up and to the right */
row -= 1;
col >>= UBIFS_LPT_FANOUT_SHIFT;
iip = cnode->iip + 1;
cnode = (struct ubifs_cnode *)nnode;
}
return 0;
}<sep>@@
identifier f,c;
expression e;
@@
f(...,struct ubifs_info *c,...) {
<...
ubifs_assert(
+ c,
e)
...>
}
<|end_of_text|> | 11,803 |
--- initial
+++ final
@@ -1,82 +1,82 @@
static int tnc_delete(struct ubifs_info *c, struct ubifs_znode *znode, int n) {
struct ubifs_zbranch *zbr;
struct ubifs_znode *zp;
int i, err;
/* Delete without merge for now */
- ubifs_assert(znode->level == 0);
- ubifs_assert(n >= 0 && n < c->fanout);
+ ubifs_assert(c, znode->level == 0);
+ ubifs_assert(c, n >= 0 && n < c->fanout);
dbg_tnck(&znode->zbranch[n].key, "deleting key ");
zbr = &znode->zbranch[n];
lnc_free(zbr);
err = ubifs_add_dirt(c, zbr->lnum, zbr->len);
if (err) {
ubifs_dump_znode(c, znode);
return err;
}
/* We do not "gap" zbranch slots */
for (i = n; i < znode->child_cnt - 1; i++)
znode->zbranch[i] = znode->zbranch[i + 1];
znode->child_cnt -= 1;
if (znode->child_cnt > 0) return 0;
/*
* This was the last zbranch, we have to delete this znode from the
* parent.
*/
do {
- ubifs_assert(!ubifs_zn_obsolete(znode));
- ubifs_assert(ubifs_zn_dirty(znode));
+ ubifs_assert(c, !ubifs_zn_obsolete(znode));
+ ubifs_assert(c, ubifs_zn_dirty(znode));
zp = znode->parent;
n = znode->iip;
atomic_long_dec(&c->dirty_zn_cnt);
err = insert_old_idx_znode(c, znode);
if (err) return err;
if (znode->cnext) {
__set_bit(OBSOLETE_ZNODE, &znode->flags);
atomic_long_inc(&c->clean_zn_cnt);
atomic_long_inc(&ubifs_clean_zn_cnt);
} else
kfree(znode);
znode = zp;
} while (znode->child_cnt == 1); /* while removing last child */
/* Remove from znode, entry n - 1 */
znode->child_cnt -= 1;
- ubifs_assert(znode->level != 0);
+ ubifs_assert(c, znode->level != 0);
for (i = n; i < znode->child_cnt; i++) {
znode->zbranch[i] = znode->zbranch[i + 1];
if (znode->zbranch[i].znode) znode->zbranch[i].znode->iip = i;
}
/*
* If this is the root and it has only 1 child then
* collapse the tree.
*/
if (!znode->parent) {
while (znode->child_cnt == 1 && znode->level != 0) {
zp = znode;
zbr = &znode->zbranch[0];
znode = get_znode(c, znode, 0);
if (IS_ERR(znode)) return PTR_ERR(znode);
znode = dirty_cow_znode(c, zbr);
if (IS_ERR(znode)) return PTR_ERR(znode);
znode->parent = NULL;
znode->iip = 0;
if (c->zroot.len) {
err = insert_old_idx(c, c->zroot.lnum, c->zroot.offs);
if (err) return err;
}
c->zroot.lnum = zbr->lnum;
c->zroot.offs = zbr->offs;
c->zroot.len = zbr->len;
c->zroot.znode = znode;
- ubifs_assert(!ubifs_zn_obsolete(zp));
- ubifs_assert(ubifs_zn_dirty(zp));
+ ubifs_assert(c, !ubifs_zn_obsolete(zp));
+ ubifs_assert(c, ubifs_zn_dirty(zp));
atomic_long_dec(&c->dirty_zn_cnt);
if (zp->cnext) {
__set_bit(OBSOLETE_ZNODE, &zp->flags);
atomic_long_inc(&c->clean_zn_cnt);
atomic_long_inc(&ubifs_clean_zn_cnt);
} else
kfree(zp);
}
}
return 0;
}<sep>@@
identifier f,c;
expression e;
@@
f(...,struct ubifs_info *c,...) {
<...
ubifs_assert(
+ c,
e)
...>
}
<|end_of_text|> | 11,862 |
--- initial
+++ final
@@ -1,33 +1,33 @@
int ubifs_read_node(const struct ubifs_info *c, void *buf, int type, int len, int lnum, int offs) {
int err, l;
struct ubifs_ch *ch = buf;
dbg_io("LEB %d:%d, %s, length %d", lnum, offs, dbg_ntype(type), len);
- ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
- ubifs_assert(len >= UBIFS_CH_SZ && offs + len <= c->leb_size);
- ubifs_assert(!(offs & 7) && offs < c->leb_size);
- ubifs_assert(type >= 0 && type < UBIFS_NODE_TYPES_CNT);
+ ubifs_assert(c, lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
+ ubifs_assert(c, len >= UBIFS_CH_SZ && offs + len <= c->leb_size);
+ ubifs_assert(c, !(offs & 7) && offs < c->leb_size);
+ ubifs_assert(c, type >= 0 && type < UBIFS_NODE_TYPES_CNT);
err = ubifs_leb_read(c, lnum, buf, offs, len, 0);
if (err && err != -EBADMSG) return err;
if (type != ch->node_type) {
ubifs_errc(c, "bad node type (%d but expected %d)", ch->node_type, type);
goto out;
}
err = ubifs_check_node(c, buf, lnum, offs, 0, 0);
if (err) {
ubifs_errc(c, "expected node type %d", type);
return err;
}
l = le32_to_cpu(ch->len);
if (l != len) {
ubifs_errc(c, "bad node length %d, expected %d", l, len);
goto out;
}
return 0;
out:
ubifs_errc(c, "bad node at LEB %d:%d, LEB mapping status %d", lnum, offs, ubi_is_mapped(c->ubi, lnum));
if (!c->probing) {
ubifs_dump_node(c, buf);
dump_stack();
}
return -EINVAL;
}<sep>@@
identifier f,c;
expression e;
@@
f(...,struct ubifs_info *c,...) {
<...
ubifs_assert(
+ c,
e)
...>
}
<|end_of_text|> | 11,761 |
--- initial
+++ final
@@ -1,10 +1,10 @@
const struct ubifs_lprops *ubifs_fast_find_empty(struct ubifs_info *c) {
struct ubifs_lprops *lprops;
- ubifs_assert(mutex_is_locked(&c->lp_mutex));
+ ubifs_assert(c, mutex_is_locked(&c->lp_mutex));
if (list_empty(&c->empty_list)) return NULL;
lprops = list_entry(c->empty_list.next, struct ubifs_lprops, list);
- ubifs_assert(!(lprops->flags & LPROPS_TAKEN));
- ubifs_assert(!(lprops->flags & LPROPS_INDEX));
- ubifs_assert(lprops->free == c->leb_size);
+ ubifs_assert(c, !(lprops->flags & LPROPS_TAKEN));
+ ubifs_assert(c, !(lprops->flags & LPROPS_INDEX));
+ ubifs_assert(c, lprops->free == c->leb_size);
return lprops;
}<sep>@@
identifier f,c;
expression e;
@@
f(...,struct ubifs_info *c,...) {
<...
ubifs_assert(
+ c,
e)
...>
}
<|end_of_text|> | 11,785 |
--- initial
+++ final
@@ -1,137 +1,137 @@
static int tnc_insert(struct ubifs_info *c, struct ubifs_znode *znode, struct ubifs_zbranch *zbr, int n) {
struct ubifs_znode *zn, *zi, *zp;
int i, keep, move, appending = 0;
union ubifs_key *key = &zbr->key, *key1;
- ubifs_assert(n >= 0 && n <= c->fanout);
+ ubifs_assert(c, n >= 0 && n <= c->fanout);
/* Implement naive insert for now */
again:
zp = znode->parent;
if (znode->child_cnt < c->fanout) {
- ubifs_assert(n != c->fanout);
+ ubifs_assert(c, n != c->fanout);
dbg_tnck(key, "inserted at %d level %d, key ", n, znode->level);
insert_zbranch(znode, zbr, n);
/* Ensure parent's key is correct */
if (n == 0 && zp && znode->iip == 0) correct_parent_keys(c, znode);
return 0;
}
/*
* Unfortunately, @znode does not have more empty slots and we have to
* split it.
*/
dbg_tnck(key, "splitting level %d, key ", znode->level);
if (znode->alt)
/*
* We can no longer be sure of finding this znode by key, so we
* record it in the old_idx tree.
*/
ins_clr_old_idx_znode(c, znode);
zn = kzalloc(c->max_znode_sz, GFP_NOFS);
if (!zn) return -ENOMEM;
zn->parent = zp;
zn->level = znode->level;
/* Decide where to split */
if (znode->level == 0 && key_type(c, key) == UBIFS_DATA_KEY) {
/* Try not to split consecutive data keys */
if (n == c->fanout) {
key1 = &znode->zbranch[n - 1].key;
if (key_inum(c, key1) == key_inum(c, key) && key_type(c, key1) == UBIFS_DATA_KEY) appending = 1;
} else
goto check_split;
} else if (appending && n != c->fanout) {
/* Try not to split consecutive data keys */
appending = 0;
check_split:
if (n >= (c->fanout + 1) / 2) {
key1 = &znode->zbranch[0].key;
if (key_inum(c, key1) == key_inum(c, key) && key_type(c, key1) == UBIFS_DATA_KEY) {
key1 = &znode->zbranch[n].key;
if (key_inum(c, key1) != key_inum(c, key) || key_type(c, key1) != UBIFS_DATA_KEY) {
keep = n;
move = c->fanout - keep;
zi = znode;
goto do_split;
}
}
}
}
if (appending) {
keep = c->fanout;
move = 0;
} else {
keep = (c->fanout + 1) / 2;
move = c->fanout - keep;
}
/*
* Although we don't at present, we could look at the neighbors and see
* if we can move some zbranches there.
*/
if (n < keep) {
/* Insert into existing znode */
zi = znode;
move += 1;
keep -= 1;
} else {
/* Insert into new znode */
zi = zn;
n -= keep;
/* Re-parent */
if (zn->level != 0) zbr->znode->parent = zn;
}
do_split:
__set_bit(DIRTY_ZNODE, &zn->flags);
atomic_long_inc(&c->dirty_zn_cnt);
zn->child_cnt = move;
znode->child_cnt = keep;
dbg_tnc("moving %d, keeping %d", move, keep);
/* Move zbranch */
for (i = 0; i < move; i++) {
zn->zbranch[i] = znode->zbranch[keep + i];
/* Re-parent */
if (zn->level != 0)
if (zn->zbranch[i].znode) {
zn->zbranch[i].znode->parent = zn;
zn->zbranch[i].znode->iip = i;
}
}
/* Insert new key and branch */
dbg_tnck(key, "inserting at %d level %d, key ", n, zn->level);
insert_zbranch(zi, zbr, n);
/* Insert new znode (produced by spitting) into the parent */
if (zp) {
if (n == 0 && zi == znode && znode->iip == 0) correct_parent_keys(c, znode);
/* Locate insertion point */
n = znode->iip + 1;
/* Tail recursion */
zbr->key = zn->zbranch[0].key;
zbr->znode = zn;
zbr->lnum = 0;
zbr->offs = 0;
zbr->len = 0;
znode = zp;
goto again;
}
/* We have to split root znode */
dbg_tnc("creating new zroot at level %d", znode->level + 1);
zi = kzalloc(c->max_znode_sz, GFP_NOFS);
if (!zi) return -ENOMEM;
zi->child_cnt = 2;
zi->level = znode->level + 1;
__set_bit(DIRTY_ZNODE, &zi->flags);
atomic_long_inc(&c->dirty_zn_cnt);
zi->zbranch[0].key = znode->zbranch[0].key;
zi->zbranch[0].znode = znode;
zi->zbranch[0].lnum = c->zroot.lnum;
zi->zbranch[0].offs = c->zroot.offs;
zi->zbranch[0].len = c->zroot.len;
zi->zbranch[1].key = zn->zbranch[0].key;
zi->zbranch[1].znode = zn;
c->zroot.lnum = 0;
c->zroot.offs = 0;
c->zroot.len = 0;
c->zroot.znode = zi;
zn->parent = zi;
zn->iip = 1;
znode->parent = zi;
znode->iip = 0;
return 0;
}<sep>@@
identifier f,c;
expression e;
@@
f(...,struct ubifs_info *c,...) {
<...
ubifs_assert(
+ c,
e)
...>
}
<|end_of_text|> | 11,864 |
--- initial
+++ final
@@ -1,15 +1,15 @@
int ubifs_leb_map(struct ubifs_info *c, int lnum) {
int err;
- ubifs_assert(!c->ro_media && !c->ro_mount);
+ ubifs_assert(c, !c->ro_media && !c->ro_mount);
if (c->ro_error) return -EROFS;
if (!dbg_is_tst_rcvry(c))
err = ubi_leb_map(c->ubi, lnum);
else
err = dbg_leb_map(c, lnum);
if (err) {
ubifs_err(c, "mapping LEB %d failed, error %d", lnum, err);
ubifs_ro_mode(c, err);
dump_stack();
}
return err;
}<sep>@@
identifier f,c;
expression e;
@@
f(...,struct ubifs_info *c,...) {
<...
ubifs_assert(
+ c,
e)
...>
}
<|end_of_text|> | 11,755 |
--- initial
+++ final
@@ -1,14 +1,14 @@
static void remove_from_lpt_heap(struct ubifs_info *c, struct ubifs_lprops *lprops, int cat) {
struct ubifs_lpt_heap *heap;
int hpos = lprops->hpos;
heap = &c->lpt_heap[cat - 1];
- ubifs_assert(hpos >= 0 && hpos < heap->cnt);
- ubifs_assert(heap->arr[hpos] == lprops);
+ ubifs_assert(c, hpos >= 0 && hpos < heap->cnt);
+ ubifs_assert(c, heap->arr[hpos] == lprops);
heap->cnt -= 1;
if (hpos < heap->cnt) {
heap->arr[hpos] = heap->arr[heap->cnt];
heap->arr[hpos]->hpos = hpos;
adjust_lpt_heap(c, heap, heap->arr[hpos], hpos, cat);
}
dbg_check_heap(c, heap, cat, -1);
}<sep>@@
identifier f,c;
expression e;
@@
f(...,struct ubifs_info *c,...) {
<...
ubifs_assert(
+ c,
e)
...>
}
<|end_of_text|> | 11,780 |
--- initial
+++ final
@@ -1,24 +1,24 @@
int ubifs_orphan_start_commit(struct ubifs_info *c) {
struct ubifs_orphan *orphan, **last;
spin_lock(&c->orphan_lock);
last = &c->orph_cnext;
list_for_each_entry(orphan, &c->orph_new, new_list) {
- ubifs_assert(orphan->new);
- ubifs_assert(!orphan->cmt);
+ ubifs_assert(c, orphan->new);
+ ubifs_assert(c, !orphan->cmt);
orphan->new = 0;
orphan->cmt = 1;
*last = orphan;
last = &orphan->cnext;
}
*last = NULL;
c->cmt_orphans = c->new_orphans;
c->new_orphans = 0;
dbg_cmt("%d orphans to commit", c->cmt_orphans);
INIT_LIST_HEAD(&c->orph_new);
if (c->tot_orphans == 0)
c->no_orphs = 1;
else
c->no_orphs = 0;
spin_unlock(&c->orphan_lock);
return 0;
}<sep>@@
identifier f,c;
expression e;
@@
f(...,struct ubifs_info *c,...) {
<...
ubifs_assert(
+ c,
e)
...>
}
<|end_of_text|> | 11,821 |
--- initial
+++ final
@@ -1,147 +1,147 @@
static int write_index(struct ubifs_info *c) {
struct ubifs_idx_node *idx;
struct ubifs_znode *znode, *cnext;
int i, lnum, offs, len, next_len, buf_len, buf_offs, used;
int avail, wlen, err, lnum_pos = 0, blen, nxt_offs;
cnext = c->enext;
if (!cnext) return 0;
/*
* Always write index nodes to the index head so that index nodes and
* other types of nodes are never mixed in the same erase block.
*/
lnum = c->ihead_lnum;
buf_offs = c->ihead_offs;
/* Allocate commit buffer */
buf_len = ALIGN(c->max_idx_node_sz, c->min_io_size);
used = 0;
avail = buf_len;
/* Ensure there is enough room for first write */
next_len = ubifs_idx_node_sz(c, cnext->child_cnt);
if (buf_offs + next_len > c->leb_size) {
err = ubifs_update_one_lp(c, lnum, LPROPS_NC, 0, 0, LPROPS_TAKEN);
if (err) return err;
lnum = -1;
}
while (1) {
cond_resched();
znode = cnext;
idx = c->cbuf + used;
/* Make index node */
idx->ch.node_type = UBIFS_IDX_NODE;
idx->child_cnt = cpu_to_le16(znode->child_cnt);
idx->level = cpu_to_le16(znode->level);
for (i = 0; i < znode->child_cnt; i++) {
struct ubifs_branch *br = ubifs_idx_branch(c, idx, i);
struct ubifs_zbranch *zbr = &znode->zbranch[i];
key_write_idx(c, &zbr->key, &br->key);
br->lnum = cpu_to_le32(zbr->lnum);
br->offs = cpu_to_le32(zbr->offs);
br->len = cpu_to_le32(zbr->len);
if (!zbr->lnum || !zbr->len) {
ubifs_err(c, "bad ref in znode");
ubifs_dump_znode(c, znode);
if (zbr->znode) ubifs_dump_znode(c, zbr->znode);
return -EINVAL;
}
}
len = ubifs_idx_node_sz(c, znode->child_cnt);
ubifs_prepare_node(c, idx, len, 0);
/* Determine the index node position */
if (lnum == -1) {
lnum = c->ilebs[lnum_pos++];
buf_offs = 0;
used = 0;
avail = buf_len;
}
offs = buf_offs + used;
if (lnum != znode->lnum || offs != znode->offs || len != znode->len) {
ubifs_err(c, "inconsistent znode posn");
return -EINVAL;
}
/* Grab some stuff from znode while we still can */
cnext = znode->cnext;
- ubifs_assert(ubifs_zn_dirty(znode));
- ubifs_assert(ubifs_zn_cow(znode));
+ ubifs_assert(c, ubifs_zn_dirty(znode));
+ ubifs_assert(c, ubifs_zn_cow(znode));
/*
* It is important that other threads should see %DIRTY_ZNODE
* flag cleared before %COW_ZNODE. Specifically, it matters in
* the 'dirty_cow_znode()' function. This is the reason for the
* first barrier. Also, we want the bit changes to be seen to
* other threads ASAP, to avoid unnecesarry copying, which is
* the reason for the second barrier.
*/
clear_bit(DIRTY_ZNODE, &znode->flags);
smp_mb__before_atomic();
clear_bit(COW_ZNODE, &znode->flags);
smp_mb__after_atomic();
/*
* We have marked the znode as clean but have not updated the
* @c->clean_zn_cnt counter. If this znode becomes dirty again
* before 'free_obsolete_znodes()' is called, then
* @c->clean_zn_cnt will be decremented before it gets
* incremented (resulting in 2 decrements for the same znode).
* This means that @c->clean_zn_cnt may become negative for a
* while.
*
* Q: why we cannot increment @c->clean_zn_cnt?
* A: because we do not have the @c->tnc_mutex locked, and the
* following code would be racy and buggy:
*
* if (!ubifs_zn_obsolete(znode)) {
* atomic_long_inc(&c->clean_zn_cnt);
* atomic_long_inc(&ubifs_clean_zn_cnt);
* }
*
* Thus, we just delay the @c->clean_zn_cnt update until we
* have the mutex locked.
*/
/* Do not access znode from this point on */
/* Update buffer positions */
wlen = used + len;
used += ALIGN(len, 8);
avail -= ALIGN(len, 8);
/*
* Calculate the next index node length to see if there is
* enough room for it
*/
if (cnext == c->cnext)
next_len = 0;
else
next_len = ubifs_idx_node_sz(c, cnext->child_cnt);
nxt_offs = buf_offs + used + next_len;
if (next_len && nxt_offs <= c->leb_size) {
if (avail > 0)
continue;
else
blen = buf_len;
} else {
wlen = ALIGN(wlen, 8);
blen = ALIGN(wlen, c->min_io_size);
ubifs_pad(c, c->cbuf + wlen, blen - wlen);
}
/* The buffer is full or there are no more znodes to do */
err = ubifs_leb_write(c, lnum, c->cbuf, buf_offs, blen);
if (err) return err;
buf_offs += blen;
if (next_len) {
if (nxt_offs > c->leb_size) {
err = ubifs_update_one_lp(c, lnum, LPROPS_NC, 0, 0, LPROPS_TAKEN);
if (err) return err;
lnum = -1;
}
used -= blen;
if (used < 0) used = 0;
avail = buf_len - used;
memmove(c->cbuf, c->cbuf + blen, used);
continue;
}
break;
}
if (lnum != c->dbg->new_ihead_lnum || buf_offs != c->dbg->new_ihead_offs) {
ubifs_err(c, "inconsistent ihead");
return -EINVAL;
}
c->ihead_lnum = lnum;
c->ihead_offs = buf_offs;
return 0;
}<sep>@@
identifier f,c;
expression e;
@@
f(...,struct ubifs_info *c,...) {
<...
ubifs_assert(
+ c,
e)
...>
}
<|end_of_text|> | 11,847 |
--- initial
+++ final
@@ -1,79 +1,79 @@
static int do_truncation(struct ubifs_info *c, struct inode *inode, const struct iattr *attr) {
int err;
struct ubifs_budget_req req;
loff_t old_size = inode->i_size, new_size = attr->ia_size;
int offset = new_size & (UBIFS_BLOCK_SIZE - 1), budgeted = 1;
struct ubifs_inode *ui = ubifs_inode(inode);
dbg_gen("ino %lu, size %lld -> %lld", inode->i_ino, old_size, new_size);
memset(&req, 0, sizeof(struct ubifs_budget_req));
/*
* If this is truncation to a smaller size, and we do not truncate on a
* block boundary, budget for changing one data block, because the last
* block will be re-written.
*/
if (new_size & (UBIFS_BLOCK_SIZE - 1)) req.dirtied_page = 1;
req.dirtied_ino = 1;
/* A funny way to budget for truncation node */
req.dirtied_ino_d = UBIFS_TRUN_NODE_SZ;
err = ubifs_budget_space(c, &req);
if (err) {
/*
* Treat truncations to zero as deletion and always allow them,
* just like we do for '->unlink()'.
*/
if (new_size || err != -ENOSPC) return err;
budgeted = 0;
}
truncate_setsize(inode, new_size);
if (offset) {
pgoff_t index = new_size >> PAGE_SHIFT;
struct page *page;
page = find_lock_page(inode->i_mapping, index);
if (page) {
if (PageDirty(page)) {
/*
* 'ubifs_jnl_truncate()' will try to truncate
* the last data node, but it contains
* out-of-date data because the page is dirty.
* Write the page now, so that
* 'ubifs_jnl_truncate()' will see an already
* truncated (and up to date) data node.
*/
- ubifs_assert(PagePrivate(page));
+ ubifs_assert(c, PagePrivate(page));
clear_page_dirty_for_io(page);
if (UBIFS_BLOCKS_PER_PAGE_SHIFT) offset = new_size & (PAGE_SIZE - 1);
err = do_writepage(page, offset);
put_page(page);
if (err) goto out_budg;
/*
* We could now tell 'ubifs_jnl_truncate()' not
* to read the last block.
*/
} else {
/*
* We could 'kmap()' the page and pass the data
* to 'ubifs_jnl_truncate()' to save it from
* having to read it.
*/
unlock_page(page);
put_page(page);
}
}
}
mutex_lock(&ui->ui_mutex);
ui->ui_size = inode->i_size;
/* Truncation changes inode [mc]time */
inode->i_mtime = inode->i_ctime = current_time(inode);
/* Other attributes may be changed at the same time as well */
do_attr_changes(inode, attr);
err = ubifs_jnl_truncate(c, inode, old_size, new_size);
mutex_unlock(&ui->ui_mutex);
out_budg:
if (budgeted)
ubifs_release_budget(c, &req);
else {
c->bi.nospace = c->bi.nospace_rp = 0;
smp_wmb();
}
return err;
}<sep>@@
identifier f,c;
expression e;
@@
f(...,struct ubifs_info *c,...) {
<...
ubifs_assert(
+ c,
e)
...>
}
<|end_of_text|> | 11,736 |
--- initial
+++ final
@@ -1,10 +1,10 @@
static int check_lpt_type(const struct ubifs_info *c, uint8_t **addr, int *pos, int type) {
int node_type;
- node_type = ubifs_unpack_bits(addr, pos, UBIFS_LPT_TYPE_BITS);
+ node_type = ubifs_unpack_bits(c, addr, pos, UBIFS_LPT_TYPE_BITS);
if (node_type != type) {
ubifs_err(c, "invalid type (%d) in LPT node type %d", node_type, type);
dump_stack();
return -EINVAL;
}
return 0;
}<sep>@@
identifier f,c;
expression e1,e2,e3;
@@
f(...,struct ubifs_info *c,...) {
<...
ubifs_unpack_bits(
+ c,
e1,e2,e3)
...>
}
<|end_of_text|> | 11,792 |
--- initial
+++ final
@@ -1,15 +1,15 @@
int ubifs_leb_unmap(struct ubifs_info *c, int lnum) {
int err;
- ubifs_assert(!c->ro_media && !c->ro_mount);
+ ubifs_assert(c, !c->ro_media && !c->ro_mount);
if (c->ro_error) return -EROFS;
if (!dbg_is_tst_rcvry(c))
err = ubi_leb_unmap(c->ubi, lnum);
else
err = dbg_leb_unmap(c, lnum);
if (err) {
ubifs_err(c, "unmap LEB %d failed, error %d", lnum, err);
ubifs_ro_mode(c, err);
dump_stack();
}
return err;
}<sep>@@
identifier f,c;
expression e;
@@
f(...,struct ubifs_info *c,...) {
<...
ubifs_assert(
+ c,
e)
...>
}
<|end_of_text|> | 11,756 |
--- initial
+++ final
@@ -1,32 +1,32 @@
static int consolidate(struct ubifs_info *c) {
int tot_avail = tot_avail_orphs(c), err = 0;
spin_lock(&c->orphan_lock);
dbg_cmt("there is space for %d orphans and there are %d", tot_avail, c->tot_orphans);
if (c->tot_orphans - c->new_orphans <= tot_avail) {
struct ubifs_orphan *orphan, **last;
int cnt = 0;
/* Change the cnext list to include all non-new orphans */
last = &c->orph_cnext;
list_for_each_entry(orphan, &c->orph_list, list) {
if (orphan->new) continue;
orphan->cmt = 1;
*last = orphan;
last = &orphan->cnext;
cnt += 1;
}
*last = NULL;
- ubifs_assert(cnt == c->tot_orphans - c->new_orphans);
+ ubifs_assert(c, cnt == c->tot_orphans - c->new_orphans);
c->cmt_orphans = cnt;
c->ohead_lnum = c->orph_first;
c->ohead_offs = 0;
} else {
/*
* We limit the number of orphans so that this should
* never happen.
*/
ubifs_err(c, "out of space in orphan area");
err = -EINVAL;
}
spin_unlock(&c->orphan_lock);
return err;
}<sep>@@
identifier f,c;
expression e;
@@
f(...,struct ubifs_info *c,...) {
<...
ubifs_assert(
+ c,
e)
...>
}
<|end_of_text|> | 11,818 |
--- initial
+++ final
@@ -1,20 +1,20 @@
static void ubifs_remove_from_cat(struct ubifs_info *c, struct ubifs_lprops *lprops, int cat) {
switch (cat) {
case LPROPS_DIRTY:
case LPROPS_DIRTY_IDX:
case LPROPS_FREE: remove_from_lpt_heap(c, lprops, cat); break;
case LPROPS_FREEABLE:
c->freeable_cnt -= 1;
- ubifs_assert(c->freeable_cnt >= 0);
+ ubifs_assert(c, c->freeable_cnt >= 0);
/* Fall through */
case LPROPS_UNCAT:
case LPROPS_EMPTY:
case LPROPS_FRDI_IDX:
- ubifs_assert(!list_empty(&lprops->list));
+ ubifs_assert(c, !list_empty(&lprops->list));
list_del(&lprops->list);
break;
- default: ubifs_assert(0);
+ default: ubifs_assert(c, 0);
}
c->in_a_category_cnt -= 1;
- ubifs_assert(c->in_a_category_cnt >= 0);
+ ubifs_assert(c, c->in_a_category_cnt >= 0);
}<sep>@@
identifier f,c;
expression e;
@@
f(...,struct ubifs_info *c,...) {
<...
ubifs_assert(
+ c,
e)
...>
}
<|end_of_text|> | 11,789 |
--- initial
+++ final
@@ -1,139 +1,139 @@
struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum, int offs, void *sbuf, int jhead) {
int ret = 0, err, len = c->leb_size - offs, start = offs, min_io_unit;
int grouped = jhead == -1 ? 0 : c->jheads[jhead].grouped;
struct ubifs_scan_leb *sleb;
void *buf = sbuf + offs;
dbg_rcvry("%d:%d, jhead %d, grouped %d", lnum, offs, jhead, grouped);
sleb = ubifs_start_scan(c, lnum, offs, sbuf);
if (IS_ERR(sleb)) return sleb;
- ubifs_assert(len >= 8);
+ ubifs_assert(c, len >= 8);
while (len >= 8) {
dbg_scan("look at LEB %d:%d (%d bytes left)", lnum, offs, len);
cond_resched();
/*
* Scan quietly until there is an error from which we cannot
* recover
*/
ret = ubifs_scan_a_node(c, buf, len, lnum, offs, 1);
if (ret == SCANNED_A_NODE) {
/* A valid node, and not a padding node */
struct ubifs_ch *ch = buf;
int node_len;
err = ubifs_add_snod(c, sleb, buf, offs);
if (err) goto error;
node_len = ALIGN(le32_to_cpu(ch->len), 8);
offs += node_len;
buf += node_len;
len -= node_len;
} else if (ret > 0) {
/* Padding bytes or a valid padding node */
offs += ret;
buf += ret;
len -= ret;
} else if (ret == SCANNED_EMPTY_SPACE || ret == SCANNED_GARBAGE || ret == SCANNED_A_BAD_PAD_NODE || ret == SCANNED_A_CORRUPT_NODE) {
dbg_rcvry("found corruption (%d) at %d:%d", ret, lnum, offs);
break;
} else {
ubifs_err(c, "unexpected return value %d", ret);
err = -EINVAL;
goto error;
}
}
if (ret == SCANNED_GARBAGE || ret == SCANNED_A_BAD_PAD_NODE) {
if (!is_last_write(c, buf, offs)) goto corrupted_rescan;
} else if (ret == SCANNED_A_CORRUPT_NODE) {
if (!no_more_nodes(c, buf, len, lnum, offs)) goto corrupted_rescan;
} else if (!is_empty(buf, len)) {
if (!is_last_write(c, buf, offs)) {
int corruption = first_non_ff(buf, len);
/*
* See header comment for this file for more
* explanations about the reasons we have this check.
*/
ubifs_err(c, "corrupt empty space LEB %d:%d, corruption starts at %d", lnum, offs, corruption);
/* Make sure we dump interesting non-0xFF data */
offs += corruption;
buf += corruption;
goto corrupted;
}
}
min_io_unit = round_down(offs, c->min_io_size);
if (grouped)
/*
* If nodes are grouped, always drop the incomplete group at
* the end.
*/
drop_last_group(sleb, &offs);
if (jhead == GCHD) {
/*
* If this LEB belongs to the GC head then while we are in the
* middle of the same min. I/O unit keep dropping nodes. So
* basically, what we want is to make sure that the last min.
* I/O unit where we saw the corruption is dropped completely
* with all the uncorrupted nodes which may possibly sit there.
*
* In other words, let's name the min. I/O unit where the
* corruption starts B, and the previous min. I/O unit A. The
* below code tries to deal with a situation when half of B
* contains valid nodes or the end of a valid node, and the
* second half of B contains corrupted data or garbage. This
* means that UBIFS had been writing to B just before the power
* cut happened. I do not know how realistic is this scenario
* that half of the min. I/O unit had been written successfully
* and the other half not, but this is possible in our 'failure
* mode emulation' infrastructure at least.
*
* So what is the problem, why we need to drop those nodes? Why
* can't we just clean-up the second half of B by putting a
* padding node there? We can, and this works fine with one
* exception which was reproduced with power cut emulation
* testing and happens extremely rarely.
*
* Imagine the file-system is full, we run GC which starts
* moving valid nodes from LEB X to LEB Y (obviously, LEB Y is
* the current GC head LEB). The @c->gc_lnum is -1, which means
* that GC will retain LEB X and will try to continue. Imagine
* that LEB X is currently the dirtiest LEB, and the amount of
* used space in LEB Y is exactly the same as amount of free
* space in LEB X.
*
* And a power cut happens when nodes are moved from LEB X to
* LEB Y. We are here trying to recover LEB Y which is the GC
* head LEB. We find the min. I/O unit B as described above.
* Then we clean-up LEB Y by padding min. I/O unit. And later
* 'ubifs_rcvry_gc_commit()' function fails, because it cannot
* find a dirty LEB which could be GC'd into LEB Y! Even LEB X
* does not match because the amount of valid nodes there does
* not fit the free space in LEB Y any more! And this is
* because of the padding node which we added to LEB Y. The
* user-visible effect of this which I once observed and
* analysed is that we cannot mount the file-system with
* -ENOSPC error.
*
* So obviously, to make sure that situation does not happen we
* should free min. I/O unit B in LEB Y completely and the last
* used min. I/O unit in LEB Y should be A. This is basically
* what the below code tries to do.
*/
while (offs > min_io_unit)
drop_last_node(sleb, &offs);
}
buf = sbuf + offs;
len = c->leb_size - offs;
clean_buf(c, &buf, lnum, &offs, &len);
ubifs_end_scan(c, sleb, lnum, offs);
err = fix_unclean_leb(c, sleb, start);
if (err) goto error;
return sleb;
corrupted_rescan:
/* Re-scan the corrupted data with verbose messages */
ubifs_err(c, "corruption %d", ret);
ubifs_scan_a_node(c, buf, len, lnum, offs, 0);
corrupted:
ubifs_scanned_corruption(c, lnum, offs, buf);
err = -EUCLEAN;
error:
ubifs_err(c, "LEB %d scanning failed", lnum);
ubifs_scan_destroy(sleb);
return ERR_PTR(err);
}<sep>@@
identifier f,c;
expression e;
@@
f(...,struct ubifs_info *c,...) {
<...
ubifs_assert(
+ c,
e)
...>
}
<|end_of_text|> | 11,826 |
--- initial
+++ final
@@ -1,31 +1,31 @@
static void ubifs_remount_ro(struct ubifs_info *c) {
int i, err;
- ubifs_assert(!c->need_recovery);
- ubifs_assert(!c->ro_mount);
+ ubifs_assert(c, !c->need_recovery);
+ ubifs_assert(c, !c->ro_mount);
mutex_lock(&c->umount_mutex);
if (c->bgt) {
kthread_stop(c->bgt);
c->bgt = NULL;
}
dbg_save_space_info(c);
for (i = 0; i < c->jhead_cnt; i++) {
err = ubifs_wbuf_sync(&c->jheads[i].wbuf);
if (err) ubifs_ro_mode(c, err);
}
c->mst_node->flags &= ~cpu_to_le32(UBIFS_MST_DIRTY);
c->mst_node->flags |= cpu_to_le32(UBIFS_MST_NO_ORPHS);
c->mst_node->gc_lnum = cpu_to_le32(c->gc_lnum);
err = ubifs_write_master(c);
if (err) ubifs_ro_mode(c, err);
vfree(c->orph_buf);
c->orph_buf = NULL;
kfree(c->write_reserve_buf);
c->write_reserve_buf = NULL;
vfree(c->ileb_buf);
c->ileb_buf = NULL;
ubifs_lpt_free(c, 1);
c->ro_mount = 1;
err = dbg_check_space_info(c);
if (err) ubifs_ro_mode(c, err);
mutex_unlock(&c->umount_mutex);
}<sep>@@
identifier f,c;
expression e;
@@
f(...,struct ubifs_info *c,...) {
<...
ubifs_assert(
+ c,
e)
...>
}
<|end_of_text|> | 11,839 |
--- initial
+++ final
@@ -1,67 +1,67 @@
static int power_cut_emulated(struct ubifs_info *c, int lnum, int write) {
struct ubifs_debug_info *d = c->dbg;
- ubifs_assert(dbg_is_tst_rcvry(c));
+ ubifs_assert(c, dbg_is_tst_rcvry(c));
if (!d->pc_cnt) {
/* First call - decide delay to the power cut */
if (chance(1, 2)) {
unsigned long delay;
if (chance(1, 2)) {
d->pc_delay = 1;
/* Fail within 1 minute */
delay = prandom_u32() % 60000;
d->pc_timeout = jiffies;
d->pc_timeout += msecs_to_jiffies(delay);
ubifs_warn(c, "failing after %lums", delay);
} else {
d->pc_delay = 2;
delay = prandom_u32() % 10000;
/* Fail within 10000 operations */
d->pc_cnt_max = delay;
ubifs_warn(c, "failing after %lu calls", delay);
}
}
d->pc_cnt += 1;
}
/* Determine if failure delay has expired */
if (d->pc_delay == 1 && time_before(jiffies, d->pc_timeout)) return 0;
if (d->pc_delay == 2 && d->pc_cnt++ < d->pc_cnt_max) return 0;
if (lnum == UBIFS_SB_LNUM) {
if (write && chance(1, 2)) return 0;
if (chance(19, 20)) return 0;
ubifs_warn(c, "failing in super block LEB %d", lnum);
} else if (lnum == UBIFS_MST_LNUM || lnum == UBIFS_MST_LNUM + 1) {
if (chance(19, 20)) return 0;
ubifs_warn(c, "failing in master LEB %d", lnum);
} else if (lnum >= UBIFS_LOG_LNUM && lnum <= c->log_last) {
if (write && chance(99, 100)) return 0;
if (chance(399, 400)) return 0;
ubifs_warn(c, "failing in log LEB %d", lnum);
} else if (lnum >= c->lpt_first && lnum <= c->lpt_last) {
if (write && chance(7, 8)) return 0;
if (chance(19, 20)) return 0;
ubifs_warn(c, "failing in LPT LEB %d", lnum);
} else if (lnum >= c->orph_first && lnum <= c->orph_last) {
if (write && chance(1, 2)) return 0;
if (chance(9, 10)) return 0;
ubifs_warn(c, "failing in orphan LEB %d", lnum);
} else if (lnum == c->ihead_lnum) {
if (chance(99, 100)) return 0;
ubifs_warn(c, "failing in index head LEB %d", lnum);
} else if (c->jheads && lnum == c->jheads[GCHD].wbuf.lnum) {
if (chance(9, 10)) return 0;
ubifs_warn(c, "failing in GC head LEB %d", lnum);
} else if (write && !RB_EMPTY_ROOT(&c->buds) && !ubifs_search_bud(c, lnum)) {
if (chance(19, 20)) return 0;
ubifs_warn(c, "failing in non-bud LEB %d", lnum);
} else if (c->cmt_state == COMMIT_RUNNING_BACKGROUND || c->cmt_state == COMMIT_RUNNING_REQUIRED) {
if (chance(999, 1000)) return 0;
ubifs_warn(c, "failing in bud LEB %d commit running", lnum);
} else {
if (chance(9999, 10000)) return 0;
ubifs_warn(c, "failing in bud LEB %d commit not running", lnum);
}
d->pc_happened = 1;
ubifs_warn(c, "========== Power cut emulated ==========");
dump_stack();
return 1;
}<sep>@@
identifier f,c;
expression e;
@@
f(...,struct ubifs_info *c,...) {
<...
ubifs_assert(
+ c,
e)
...>
}
<|end_of_text|> | 11,734 |
--- initial
+++ final
@@ -1,14 +1,13 @@
static void prism2sta_inf_hostscanresults(wlandevice_t *wlandev, hfa384x_InfFrame_t *inf) {
hfa384x_t *hw = (hfa384x_t *)wlandev->priv;
int nbss;
nbss = (inf->framelen - 3) / 32;
pr_debug("Received %d hostscan results\n", nbss);
if (nbss > 32) nbss = 32;
kfree(hw->scanresults);
- hw->scanresults = kmalloc(sizeof(hfa384x_InfFrame_t), GFP_ATOMIC);
- memcpy(hw->scanresults, inf, sizeof(hfa384x_InfFrame_t));
+ hw->scanresults = kmemdup(inf, sizeof(hfa384x_InfFrame_t), GFP_ATOMIC);
if (nbss == 0) nbss = -1;
/* Notify/wake the sleeping caller. */
hw->scanflag = nbss;
wake_up_interruptible(&hw->cmdq);
}<sep>@@
expression from,to,size,flag;
statement S;
@@
- to = \(kmalloc\|kzalloc\)(size,flag);
- memcpy(to, from, size);
+ to = kmemdup(from,size,flag);
<|end_of_text|> | 445 |
--- initial
+++ final
@@ -1,18 +1,18 @@
static int vbox_connector_init(struct drm_device *dev, struct vbox_crtc *vbox_crtc, struct drm_encoder *encoder) {
struct vbox_connector *vbox_connector;
struct drm_connector *connector;
vbox_connector = kzalloc(sizeof(*vbox_connector), GFP_KERNEL);
if (!vbox_connector) return -ENOMEM;
connector = &vbox_connector->base;
vbox_connector->vbox_crtc = vbox_crtc;
drm_connector_init(dev, connector, &vbox_connector_funcs, DRM_MODE_CONNECTOR_VGA);
drm_connector_helper_add(connector, &vbox_connector_helper_funcs);
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
drm_mode_create_suggested_offset_properties(dev);
drm_object_attach_property(&connector->base, dev->mode_config.suggested_x_property, 0);
drm_object_attach_property(&connector->base, dev->mode_config.suggested_y_property, 0);
drm_connector_register(connector);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
return 0;
}<sep>@@
expression e1,e2;
@@
- drm_mode_connector_attach_encoder
+ drm_connector_attach_encoder
(e1,e2)
<|end_of_text|> | 11,340 |
--- initial
+++ final
@@ -1,22 +1,22 @@
static int adv7511_bridge_attach(struct drm_bridge *bridge) {
struct adv7511 *adv = bridge_to_adv7511(bridge);
int ret;
if (!bridge->encoder) {
DRM_ERROR("Parent encoder object not found");
return -ENODEV;
}
if (adv->i2c_main->irq)
adv->connector.polled = DRM_CONNECTOR_POLL_HPD;
else
adv->connector.polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
ret = drm_connector_init(bridge->dev, &adv->connector, &adv7511_connector_funcs, DRM_MODE_CONNECTOR_HDMIA);
if (ret) {
DRM_ERROR("Failed to initialize connector with drm\n");
return ret;
}
drm_connector_helper_add(&adv->connector, &adv7511_connector_helper_funcs);
- drm_mode_connector_attach_encoder(&adv->connector, bridge->encoder);
+ drm_connector_attach_encoder(&adv->connector, bridge->encoder);
if (adv->type == ADV7533) ret = adv7533_attach_dsi(adv);
if (adv->i2c_main->irq) regmap_write(adv->regmap, ADV7511_REG_INT_ENABLE(0), ADV7511_INT0_HPD);
return ret;
}<sep>@@
expression e1,e2;
@@
- drm_mode_connector_attach_encoder
+ drm_connector_attach_encoder
(e1,e2)
<|end_of_text|> | 11,265 |
--- initial
+++ final
@@ -1,20 +1,20 @@
static struct drm_connector *panel_connector_create(struct drm_device *dev, struct panel_module *mod, struct drm_encoder *encoder) {
struct panel_connector *panel_connector;
struct drm_connector *connector;
int ret;
panel_connector = devm_kzalloc(dev->dev, sizeof(*panel_connector), GFP_KERNEL);
if (!panel_connector) return NULL;
panel_connector->encoder = encoder;
panel_connector->mod = mod;
connector = &panel_connector->base;
drm_connector_init(dev, connector, &panel_connector_funcs, DRM_MODE_CONNECTOR_LVDS);
drm_connector_helper_add(connector, &panel_connector_helper_funcs);
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
- ret = drm_mode_connector_attach_encoder(connector, encoder);
+ ret = drm_connector_attach_encoder(connector, encoder);
if (ret) goto fail;
return connector;
fail:
panel_connector_destroy(connector);
return NULL;
}<sep>@@
expression e1,e2;
@@
- drm_mode_connector_attach_encoder
+ drm_connector_attach_encoder
(e1,e2)
<|end_of_text|> | 11,335 |
--- initial
+++ final
@@ -1,11 +1,11 @@
static int dw_hdmi_bridge_attach(struct drm_bridge *bridge) {
struct dw_hdmi *hdmi = bridge->driver_private;
struct drm_encoder *encoder = bridge->encoder;
struct drm_connector *connector = &hdmi->connector;
connector->interlace_allowed = 1;
connector->polled = DRM_CONNECTOR_POLL_HPD;
drm_connector_helper_add(connector, &dw_hdmi_connector_helper_funcs);
drm_connector_init(bridge->dev, connector, &dw_hdmi_connector_funcs, DRM_MODE_CONNECTOR_HDMIA);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
return 0;
}<sep>@@
expression e1,e2;
@@
- drm_mode_connector_attach_encoder
+ drm_connector_attach_encoder
(e1,e2)
<|end_of_text|> | 11,287 |
--- initial
+++ final
@@ -1,31 +1,31 @@
int nv04_dfp_create(struct drm_connector *connector, struct dcb_output *entry) {
const struct drm_encoder_helper_funcs *helper;
struct nouveau_encoder *nv_encoder = NULL;
struct drm_encoder *encoder;
int type;
switch (entry->type) {
case DCB_OUTPUT_TMDS:
type = DRM_MODE_ENCODER_TMDS;
helper = &nv04_tmds_helper_funcs;
break;
case DCB_OUTPUT_LVDS:
type = DRM_MODE_ENCODER_LVDS;
helper = &nv04_lvds_helper_funcs;
break;
default: return -EINVAL;
}
nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
if (!nv_encoder) return -ENOMEM;
nv_encoder->enc_save = nv04_dfp_save;
nv_encoder->enc_restore = nv04_dfp_restore;
encoder = to_drm_encoder(nv_encoder);
nv_encoder->dcb = entry;
nv_encoder->or = ffs(entry->or) - 1;
drm_encoder_init(connector->dev, encoder, &nv04_dfp_funcs, type, NULL);
drm_encoder_helper_add(encoder, helper);
encoder->possible_crtcs = entry->heads;
encoder->possible_clones = 0;
if (entry->type == DCB_OUTPUT_TMDS && entry->location != DCB_LOC_ON_CHIP) nv04_tmds_slave_init(encoder);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
return 0;
}<sep>@@
expression e1,e2;
@@
- drm_mode_connector_attach_encoder
+ drm_connector_attach_encoder
(e1,e2)
<|end_of_text|> | 11,277 |
--- initial
+++ final
@@ -1,22 +1,22 @@
int shmob_drm_connector_create(struct shmob_drm_device *sdev, struct drm_encoder *encoder) {
struct drm_connector *connector = &sdev->connector.connector;
int ret;
sdev->connector.encoder = encoder;
connector->display_info.width_mm = sdev->pdata->panel.width_mm;
connector->display_info.height_mm = sdev->pdata->panel.height_mm;
ret = drm_connector_init(sdev->ddev, connector, &connector_funcs, DRM_MODE_CONNECTOR_LVDS);
if (ret < 0) return ret;
drm_connector_helper_add(connector, &connector_helper_funcs);
ret = shmob_drm_backlight_init(&sdev->connector);
if (ret < 0) goto err_cleanup;
- ret = drm_mode_connector_attach_encoder(connector, encoder);
+ ret = drm_connector_attach_encoder(connector, encoder);
if (ret < 0) goto err_backlight;
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
drm_object_property_set_value(&connector->base, sdev->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF);
return 0;
err_backlight:
shmob_drm_backlight_exit(&sdev->connector);
err_cleanup:
drm_connector_cleanup(connector);
return ret;
}<sep>@@
expression e1,e2;
@@
- drm_mode_connector_attach_encoder
+ drm_connector_attach_encoder
(e1,e2)
<|end_of_text|> | 11,321 |
--- initial
+++ final
@@ -1,23 +1,23 @@
int nv04_dac_create(struct drm_connector *connector, struct dcb_output *entry) {
const struct drm_encoder_helper_funcs *helper;
struct nouveau_encoder *nv_encoder = NULL;
struct drm_device *dev = connector->dev;
struct drm_encoder *encoder;
nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
if (!nv_encoder) return -ENOMEM;
encoder = to_drm_encoder(nv_encoder);
nv_encoder->dcb = entry;
nv_encoder->or = ffs(entry->or) - 1;
nv_encoder->enc_save = nv04_dac_save;
nv_encoder->enc_restore = nv04_dac_restore;
if (nv_gf4_disp_arch(dev))
helper = &nv17_dac_helper_funcs;
else
helper = &nv04_dac_helper_funcs;
drm_encoder_init(dev, encoder, &nv04_dac_funcs, DRM_MODE_ENCODER_DAC, NULL);
drm_encoder_helper_add(encoder, helper);
encoder->possible_crtcs = entry->heads;
encoder->possible_clones = 0;
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
return 0;
}<sep>@@
expression e1,e2;
@@
- drm_mode_connector_attach_encoder
+ drm_connector_attach_encoder
(e1,e2)
<|end_of_text|> | 11,275 |
--- initial
+++ final
@@ -1,34 +1,34 @@
int drm_writeback_connector_init(struct drm_device *dev, struct drm_writeback_connector *wb_connector, const struct drm_connector_funcs *con_funcs, const struct drm_encoder_helper_funcs *enc_helper_funcs, const u32 *formats, int n_formats) {
struct drm_property_blob *blob;
struct drm_connector *connector = &wb_connector->base;
struct drm_mode_config *config = &dev->mode_config;
int ret = create_writeback_properties(dev);
if (ret != 0) return ret;
blob = drm_property_create_blob(dev, n_formats * sizeof(*formats), formats);
if (IS_ERR(blob)) return PTR_ERR(blob);
drm_encoder_helper_add(&wb_connector->encoder, enc_helper_funcs);
ret = drm_encoder_init(dev, &wb_connector->encoder, &drm_writeback_encoder_funcs, DRM_MODE_ENCODER_VIRTUAL, NULL);
if (ret) goto fail;
connector->interlace_allowed = 0;
ret = drm_connector_init(dev, connector, con_funcs, DRM_MODE_CONNECTOR_WRITEBACK);
if (ret) goto connector_fail;
- ret = drm_mode_connector_attach_encoder(connector, &wb_connector->encoder);
+ ret = drm_connector_attach_encoder(connector, &wb_connector->encoder);
if (ret) goto attach_fail;
INIT_LIST_HEAD(&wb_connector->job_queue);
spin_lock_init(&wb_connector->job_lock);
wb_connector->fence_context = dma_fence_context_alloc(1);
spin_lock_init(&wb_connector->fence_lock);
snprintf(wb_connector->timeline_name, sizeof(wb_connector->timeline_name), "CONNECTOR:%d-%s", connector->base.id, connector->name);
drm_object_attach_property(&connector->base, config->writeback_out_fence_ptr_property, 0);
drm_object_attach_property(&connector->base, config->writeback_fb_id_property, 0);
drm_object_attach_property(&connector->base, config->writeback_pixel_formats_property, blob->base.id);
wb_connector->pixel_formats_blob_ptr = blob;
return 0;
attach_fail:
drm_connector_cleanup(connector);
connector_fail:
drm_encoder_cleanup(&wb_connector->encoder);
fail:
drm_property_blob_put(blob);
return ret;
}<sep>@@
expression e1,e2;
@@
- drm_mode_connector_attach_encoder
+ drm_connector_attach_encoder
(e1,e2)
<|end_of_text|> | 11,283 |
--- initial
+++ final
@@ -1,42 +1,42 @@
static int analogix_dp_bridge_attach(struct drm_bridge *bridge) {
struct analogix_dp_device *dp = bridge->driver_private;
struct drm_encoder *encoder = dp->encoder;
struct drm_connector *connector = NULL;
int ret = 0;
if (!bridge->encoder) {
DRM_ERROR("Parent encoder object not found");
return -ENODEV;
}
if (!dp->plat_data->skip_connector) {
connector = &dp->connector;
connector->polled = DRM_CONNECTOR_POLL_HPD;
ret = drm_connector_init(dp->drm_dev, connector, &analogix_dp_connector_funcs, DRM_MODE_CONNECTOR_eDP);
if (ret) {
DRM_ERROR("Failed to initialize connector with drm\n");
return ret;
}
drm_connector_helper_add(connector, &analogix_dp_connector_helper_funcs);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
}
/*
* NOTE: the connector registration is implemented in analogix
* platform driver, that to say connector would be exist after
* plat_data->attch return, that's why we record the connector
* point after plat attached.
*/
if (dp->plat_data->attach) {
ret = dp->plat_data->attach(dp->plat_data, bridge, connector);
if (ret) {
DRM_ERROR("Failed at platform attch func\n");
return ret;
}
}
if (dp->plat_data->panel) {
ret = drm_panel_attach(dp->plat_data->panel, &dp->connector);
if (ret) {
DRM_ERROR("Failed to attach panel\n");
return ret;
}
}
return 0;
}<sep>@@
expression e1,e2;
@@
- drm_mode_connector_attach_encoder
+ drm_connector_attach_encoder
(e1,e2)
<|end_of_text|> | 11,269 |
--- initial
+++ final
@@ -1,14 +1,14 @@
static int tc_bridge_attach(struct drm_bridge *bridge) {
u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
struct tc_data *tc = bridge_to_tc(bridge);
struct drm_device *drm = bridge->dev;
int ret;
/* Create eDP connector */
drm_connector_helper_add(&tc->connector, &tc_connector_helper_funcs);
ret = drm_connector_init(drm, &tc->connector, &tc_connector_funcs, DRM_MODE_CONNECTOR_eDP);
if (ret) return ret;
if (tc->panel) drm_panel_attach(tc->panel, &tc->connector);
drm_display_info_set_bus_formats(&tc->connector.display_info, &bus_format, 1);
- drm_mode_connector_attach_encoder(&tc->connector, tc->bridge.encoder);
+ drm_connector_attach_encoder(&tc->connector, tc->bridge.encoder);
return 0;
}<sep>@@
expression e1,e2;
@@
- drm_mode_connector_attach_encoder
+ drm_connector_attach_encoder
(e1,e2)
<|end_of_text|> | 11,332 |
--- initial
+++ final
@@ -1,52 +1,52 @@
static int sti_hdmi_bind(struct device *dev, struct device *master, void *data) {
struct sti_hdmi *hdmi = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
struct drm_encoder *encoder;
struct sti_hdmi_connector *connector;
struct drm_connector *drm_connector;
struct drm_bridge *bridge;
int err;
/* Set the drm device handle */
hdmi->drm_dev = drm_dev;
encoder = sti_hdmi_find_encoder(drm_dev);
if (!encoder) return -EINVAL;
connector = devm_kzalloc(dev, sizeof(*connector), GFP_KERNEL);
if (!connector) return -EINVAL;
connector->hdmi = hdmi;
bridge = devm_kzalloc(dev, sizeof(*bridge), GFP_KERNEL);
if (!bridge) return -EINVAL;
bridge->driver_private = hdmi;
bridge->funcs = &sti_hdmi_bridge_funcs;
drm_bridge_attach(encoder, bridge, NULL);
connector->encoder = encoder;
drm_connector = (struct drm_connector *)connector;
drm_connector->polled = DRM_CONNECTOR_POLL_HPD;
drm_connector_init(drm_dev, drm_connector, &sti_hdmi_connector_funcs, DRM_MODE_CONNECTOR_HDMIA);
drm_connector_helper_add(drm_connector, &sti_hdmi_connector_helper_funcs);
/* initialise property */
sti_hdmi_connector_init_property(drm_dev, drm_connector);
hdmi->drm_connector = drm_connector;
- err = drm_mode_connector_attach_encoder(drm_connector, encoder);
+ err = drm_connector_attach_encoder(drm_connector, encoder);
if (err) {
DRM_ERROR("Failed to attach a connector to a encoder\n");
goto err_sysfs;
}
err = sti_hdmi_register_audio_driver(dev, hdmi);
if (err) {
DRM_ERROR("Failed to attach an audio codec\n");
goto err_sysfs;
}
/* Initialize audio infoframe */
err = hdmi_audio_infoframe_init(&hdmi->audio.cea);
if (err) {
DRM_ERROR("Failed to init audio infoframe\n");
goto err_sysfs;
}
/* Enable default interrupts */
hdmi_write(hdmi, HDMI_DEFAULT_INT, HDMI_INT_EN);
return 0;
err_sysfs:
drm_bridge_remove(bridge);
hdmi->drm_connector = NULL;
return -EINVAL;
}<sep>@@
expression e1,e2;
@@
- drm_mode_connector_attach_encoder
+ drm_connector_attach_encoder
(e1,e2)
<|end_of_text|> | 11,326 |
--- initial
+++ final
@@ -1,49 +1,49 @@
int sun4i_rgb_init(struct drm_device *drm, struct sun4i_tcon *tcon) {
struct drm_encoder *encoder;
struct drm_bridge *bridge;
struct sun4i_rgb *rgb;
int ret;
rgb = devm_kzalloc(drm->dev, sizeof(*rgb), GFP_KERNEL);
if (!rgb) return -ENOMEM;
rgb->tcon = tcon;
encoder = &rgb->encoder;
ret = drm_of_find_panel_or_bridge(tcon->dev->of_node, 1, 0, &tcon->panel, &bridge);
if (ret) {
dev_info(drm->dev, "No panel or bridge found... RGB output disabled\n");
return 0;
}
drm_encoder_helper_add(&rgb->encoder, &sun4i_rgb_enc_helper_funcs);
ret = drm_encoder_init(drm, &rgb->encoder, &sun4i_rgb_enc_funcs, DRM_MODE_ENCODER_NONE, NULL);
if (ret) {
dev_err(drm->dev, "Couldn't initialise the rgb encoder\n");
goto err_out;
}
/* The RGB encoder can only work with the TCON channel 0 */
rgb->encoder.possible_crtcs = drm_crtc_mask(&tcon->crtc->crtc);
if (tcon->panel) {
drm_connector_helper_add(&rgb->connector, &sun4i_rgb_con_helper_funcs);
ret = drm_connector_init(drm, &rgb->connector, &sun4i_rgb_con_funcs, DRM_MODE_CONNECTOR_Unknown);
if (ret) {
dev_err(drm->dev, "Couldn't initialise the rgb connector\n");
goto err_cleanup_connector;
}
- drm_mode_connector_attach_encoder(&rgb->connector, &rgb->encoder);
+ drm_connector_attach_encoder(&rgb->connector, &rgb->encoder);
ret = drm_panel_attach(tcon->panel, &rgb->connector);
if (ret) {
dev_err(drm->dev, "Couldn't attach our panel\n");
goto err_cleanup_connector;
}
}
if (bridge) {
ret = drm_bridge_attach(encoder, bridge, NULL);
if (ret) {
dev_err(drm->dev, "Couldn't attach our bridge\n");
goto err_cleanup_connector;
}
}
return 0;
err_cleanup_connector:
drm_encoder_cleanup(&rgb->encoder);
err_out:
return ret;
}<sep>@@
expression e1,e2;
@@
- drm_mode_connector_attach_encoder
+ drm_connector_attach_encoder
(e1,e2)
<|end_of_text|> | 11,329 |
--- initial
+++ final
@@ -1,15 +1,15 @@
static int rcar_lvds_attach(struct drm_bridge *bridge) {
struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
struct drm_connector *connector = &lvds->connector;
struct drm_encoder *encoder = bridge->encoder;
int ret;
/* If we have a next bridge just attach it. */
if (lvds->next_bridge) return drm_bridge_attach(bridge->encoder, lvds->next_bridge, bridge);
/* Otherwise we have a panel, create a connector. */
ret = drm_connector_init(bridge->dev, connector, &rcar_lvds_conn_funcs, DRM_MODE_CONNECTOR_LVDS);
if (ret < 0) return ret;
drm_connector_helper_add(connector, &rcar_lvds_conn_helper_funcs);
- ret = drm_mode_connector_attach_encoder(connector, encoder);
+ ret = drm_connector_attach_encoder(connector, encoder);
if (ret < 0) return ret;
return drm_panel_attach(lvds->panel, connector);
}<sep>@@
expression e1,e2;
@@
- drm_mode_connector_attach_encoder
+ drm_connector_attach_encoder
(e1,e2)
<|end_of_text|> | 11,318 |
--- initial
+++ final
@@ -1,28 +1,28 @@
int nv17_tv_create(struct drm_connector *connector, struct dcb_output *entry) {
struct drm_device *dev = connector->dev;
struct drm_encoder *encoder;
struct nv17_tv_encoder *tv_enc = NULL;
tv_enc = kzalloc(sizeof(*tv_enc), GFP_KERNEL);
if (!tv_enc) return -ENOMEM;
tv_enc->overscan = 50;
tv_enc->flicker = 50;
tv_enc->saturation = 50;
tv_enc->hue = 0;
tv_enc->tv_norm = TV_NORM_PAL;
tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
tv_enc->select_subconnector = DRM_MODE_SUBCONNECTOR_Automatic;
tv_enc->pin_mask = 0;
encoder = to_drm_encoder(&tv_enc->base);
tv_enc->base.dcb = entry;
tv_enc->base.or = ffs(entry->or) - 1;
drm_encoder_init(dev, encoder, &nv17_tv_funcs, DRM_MODE_ENCODER_TVDAC, NULL);
drm_encoder_helper_add(encoder, &nv17_tv_helper_funcs);
to_encoder_slave(encoder)->slave_funcs = &nv17_tv_slave_funcs;
tv_enc->base.enc_save = nv17_tv_save;
tv_enc->base.enc_restore = nv17_tv_restore;
encoder->possible_crtcs = entry->heads;
encoder->possible_clones = 0;
nv17_tv_create_resources(encoder, connector);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
return 0;
}<sep>@@
expression e1,e2;
@@
- drm_mode_connector_attach_encoder
+ drm_connector_attach_encoder
(e1,e2)
<|end_of_text|> | 11,338 |
--- initial
+++ final
@@ -1,133 +1,133 @@
static int sun4i_hdmi_bind(struct device *dev, struct device *master, void *data) {
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = data;
struct sun4i_drv *drv = drm->dev_private;
struct sun4i_hdmi *hdmi;
struct resource *res;
u32 reg;
int ret;
hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL);
if (!hdmi) return -ENOMEM;
dev_set_drvdata(dev, hdmi);
hdmi->dev = dev;
hdmi->drv = drv;
hdmi->variant = of_device_get_match_data(dev);
if (!hdmi->variant) return -EINVAL;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
hdmi->base = devm_ioremap_resource(dev, res);
if (IS_ERR(hdmi->base)) {
dev_err(dev, "Couldn't map the HDMI encoder registers\n");
return PTR_ERR(hdmi->base);
}
if (hdmi->variant->has_reset_control) {
hdmi->reset = devm_reset_control_get(dev, NULL);
if (IS_ERR(hdmi->reset)) {
dev_err(dev, "Couldn't get the HDMI reset control\n");
return PTR_ERR(hdmi->reset);
}
ret = reset_control_deassert(hdmi->reset);
if (ret) {
dev_err(dev, "Couldn't deassert HDMI reset\n");
return ret;
}
}
hdmi->bus_clk = devm_clk_get(dev, "ahb");
if (IS_ERR(hdmi->bus_clk)) {
dev_err(dev, "Couldn't get the HDMI bus clock\n");
ret = PTR_ERR(hdmi->bus_clk);
goto err_assert_reset;
}
clk_prepare_enable(hdmi->bus_clk);
hdmi->mod_clk = devm_clk_get(dev, "mod");
if (IS_ERR(hdmi->mod_clk)) {
dev_err(dev, "Couldn't get the HDMI mod clock\n");
ret = PTR_ERR(hdmi->mod_clk);
goto err_disable_bus_clk;
}
clk_prepare_enable(hdmi->mod_clk);
hdmi->pll0_clk = devm_clk_get(dev, "pll-0");
if (IS_ERR(hdmi->pll0_clk)) {
dev_err(dev, "Couldn't get the HDMI PLL 0 clock\n");
ret = PTR_ERR(hdmi->pll0_clk);
goto err_disable_mod_clk;
}
hdmi->pll1_clk = devm_clk_get(dev, "pll-1");
if (IS_ERR(hdmi->pll1_clk)) {
dev_err(dev, "Couldn't get the HDMI PLL 1 clock\n");
ret = PTR_ERR(hdmi->pll1_clk);
goto err_disable_mod_clk;
}
hdmi->regmap = devm_regmap_init_mmio(dev, hdmi->base, &sun4i_hdmi_regmap_config);
if (IS_ERR(hdmi->regmap)) {
dev_err(dev, "Couldn't create HDMI encoder regmap\n");
ret = PTR_ERR(hdmi->regmap);
goto err_disable_mod_clk;
}
ret = sun4i_tmds_create(hdmi);
if (ret) {
dev_err(dev, "Couldn't create the TMDS clock\n");
goto err_disable_mod_clk;
}
if (hdmi->variant->has_ddc_parent_clk) {
hdmi->ddc_parent_clk = devm_clk_get(dev, "ddc");
if (IS_ERR(hdmi->ddc_parent_clk)) {
dev_err(dev, "Couldn't get the HDMI DDC clock\n");
ret = PTR_ERR(hdmi->ddc_parent_clk);
goto err_disable_mod_clk;
}
} else {
hdmi->ddc_parent_clk = hdmi->tmds_clk;
}
writel(SUN4I_HDMI_CTRL_ENABLE, hdmi->base + SUN4I_HDMI_CTRL_REG);
writel(hdmi->variant->pad_ctrl0_init_val, hdmi->base + SUN4I_HDMI_PAD_CTRL0_REG);
reg = readl(hdmi->base + SUN4I_HDMI_PLL_CTRL_REG);
reg &= SUN4I_HDMI_PLL_CTRL_DIV_MASK;
reg |= hdmi->variant->pll_ctrl_init_val;
writel(reg, hdmi->base + SUN4I_HDMI_PLL_CTRL_REG);
ret = sun4i_hdmi_i2c_create(dev, hdmi);
if (ret) {
dev_err(dev, "Couldn't create the HDMI I2C adapter\n");
goto err_disable_mod_clk;
}
drm_encoder_helper_add(&hdmi->encoder, &sun4i_hdmi_helper_funcs);
ret = drm_encoder_init(drm, &hdmi->encoder, &sun4i_hdmi_funcs, DRM_MODE_ENCODER_TMDS, NULL);
if (ret) {
dev_err(dev, "Couldn't initialise the HDMI encoder\n");
goto err_del_i2c_adapter;
}
hdmi->encoder.possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node);
if (!hdmi->encoder.possible_crtcs) {
ret = -EPROBE_DEFER;
goto err_del_i2c_adapter;
}
#ifdef CONFIG_DRM_SUN4I_HDMI_CEC
hdmi->cec_adap = cec_pin_allocate_adapter(&sun4i_hdmi_cec_pin_ops, hdmi, "sun4i", CEC_CAP_TRANSMIT | CEC_CAP_LOG_ADDRS | CEC_CAP_PASSTHROUGH | CEC_CAP_RC);
ret = PTR_ERR_OR_ZERO(hdmi->cec_adap);
if (ret < 0) goto err_cleanup_connector;
writel(readl(hdmi->base + SUN4I_HDMI_CEC) & ~SUN4I_HDMI_CEC_TX, hdmi->base + SUN4I_HDMI_CEC);
#endif
drm_connector_helper_add(&hdmi->connector, &sun4i_hdmi_connector_helper_funcs);
ret = drm_connector_init(drm, &hdmi->connector, &sun4i_hdmi_connector_funcs, DRM_MODE_CONNECTOR_HDMIA);
if (ret) {
dev_err(dev, "Couldn't initialise the HDMI connector\n");
goto err_cleanup_connector;
}
/* There is no HPD interrupt, so we need to poll the controller */
hdmi->connector.polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
ret = cec_register_adapter(hdmi->cec_adap, dev);
if (ret < 0) goto err_cleanup_connector;
- drm_mode_connector_attach_encoder(&hdmi->connector, &hdmi->encoder);
+ drm_connector_attach_encoder(&hdmi->connector, &hdmi->encoder);
return 0;
err_cleanup_connector:
cec_delete_adapter(hdmi->cec_adap);
drm_encoder_cleanup(&hdmi->encoder);
err_del_i2c_adapter:
i2c_del_adapter(hdmi->i2c);
err_disable_mod_clk:
clk_disable_unprepare(hdmi->mod_clk);
err_disable_bus_clk:
clk_disable_unprepare(hdmi->bus_clk);
err_assert_reset:
reset_control_assert(hdmi->reset);
return ret;
}<sep>@@
expression e1,e2;
@@
- drm_mode_connector_attach_encoder
+ drm_connector_attach_encoder
(e1,e2)
<|end_of_text|> | 11,327 |
--- initial
+++ final
@@ -1,27 +1,27 @@
static int sun6i_dsi_bind(struct device *dev, struct device *master, void *data) {
struct drm_device *drm = data;
struct sun4i_drv *drv = drm->dev_private;
struct sun6i_dsi *dsi = dev_get_drvdata(dev);
int ret;
if (!dsi->panel) return -EPROBE_DEFER;
dsi->drv = drv;
drm_encoder_helper_add(&dsi->encoder, &sun6i_dsi_enc_helper_funcs);
ret = drm_encoder_init(drm, &dsi->encoder, &sun6i_dsi_enc_funcs, DRM_MODE_ENCODER_DSI, NULL);
if (ret) {
dev_err(dsi->dev, "Couldn't initialise the DSI encoder\n");
return ret;
}
dsi->encoder.possible_crtcs = BIT(0);
drm_connector_helper_add(&dsi->connector, &sun6i_dsi_connector_helper_funcs);
ret = drm_connector_init(drm, &dsi->connector, &sun6i_dsi_connector_funcs, DRM_MODE_CONNECTOR_DSI);
if (ret) {
dev_err(dsi->dev, "Couldn't initialise the DSI connector\n");
goto err_cleanup_connector;
}
- drm_mode_connector_attach_encoder(&dsi->connector, &dsi->encoder);
+ drm_connector_attach_encoder(&dsi->connector, &dsi->encoder);
drm_panel_attach(dsi->panel, &dsi->connector);
return 0;
err_cleanup_connector:
drm_encoder_cleanup(&dsi->encoder);
return ret;
}<sep>@@
expression e1,e2;
@@
- drm_mode_connector_attach_encoder
+ drm_connector_attach_encoder
(e1,e2)
<|end_of_text|> | 11,331 |
--- initial
+++ final
@@ -1,25 +1,25 @@
int hibmc_vdac_init(struct hibmc_drm_private *priv) {
struct drm_device *dev = priv->dev;
struct drm_encoder *encoder;
struct drm_connector *connector;
int ret;
connector = hibmc_connector_init(priv);
if (IS_ERR(connector)) {
DRM_ERROR("failed to create connector: %ld\n", PTR_ERR(connector));
return PTR_ERR(connector);
}
encoder = devm_kzalloc(dev->dev, sizeof(*encoder), GFP_KERNEL);
if (!encoder) {
DRM_ERROR("failed to alloc memory when init encoder\n");
return -ENOMEM;
}
encoder->possible_crtcs = 0x1;
ret = drm_encoder_init(dev, encoder, &hibmc_encoder_funcs, DRM_MODE_ENCODER_DAC, NULL);
if (ret) {
DRM_ERROR("failed to init encoder: %d\n", ret);
return ret;
}
drm_encoder_helper_add(encoder, &hibmc_encoder_helper_funcs);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
return 0;
}<sep>@@
expression e1,e2;
@@
- drm_mode_connector_attach_encoder
+ drm_connector_attach_encoder
(e1,e2)
<|end_of_text|> | 11,298 |
--- initial
+++ final
@@ -1,28 +1,28 @@
static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index) {
struct drm_device *dev = vgdev->ddev;
struct virtio_gpu_output *output = vgdev->outputs + index;
struct drm_connector *connector = &output->conn;
struct drm_encoder *encoder = &output->enc;
struct drm_crtc *crtc = &output->crtc;
struct drm_plane *primary, *cursor;
output->index = index;
if (index == 0) {
output->info.enabled = cpu_to_le32(true);
output->info.r.width = cpu_to_le32(XRES_DEF);
output->info.r.height = cpu_to_le32(YRES_DEF);
}
primary = virtio_gpu_plane_init(vgdev, DRM_PLANE_TYPE_PRIMARY, index);
if (IS_ERR(primary)) return PTR_ERR(primary);
cursor = virtio_gpu_plane_init(vgdev, DRM_PLANE_TYPE_CURSOR, index);
if (IS_ERR(cursor)) return PTR_ERR(cursor);
drm_crtc_init_with_planes(dev, crtc, primary, cursor, &virtio_gpu_crtc_funcs, NULL);
drm_crtc_helper_add(crtc, &virtio_gpu_crtc_helper_funcs);
drm_connector_init(dev, connector, &virtio_gpu_connector_funcs, DRM_MODE_CONNECTOR_VIRTUAL);
drm_connector_helper_add(connector, &virtio_gpu_conn_helper_funcs);
drm_encoder_init(dev, encoder, &virtio_gpu_enc_funcs, DRM_MODE_ENCODER_VIRTUAL, NULL);
drm_encoder_helper_add(encoder, &virtio_gpu_enc_helper_funcs);
encoder->possible_crtcs = 1 << index;
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
drm_connector_register(connector);
return 0;
}<sep>@@
expression e1,e2;
@@
- drm_mode_connector_attach_encoder
+ drm_connector_attach_encoder
(e1,e2)
<|end_of_text|> | 11,343 |
--- initial
+++ final
@@ -1,24 +1,24 @@
int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc) {
struct tegra_output *output = dc->rgb;
int err;
if (!dc->rgb) return -ENODEV;
drm_connector_init(drm, &output->connector, &tegra_rgb_connector_funcs, DRM_MODE_CONNECTOR_LVDS);
drm_connector_helper_add(&output->connector, &tegra_rgb_connector_helper_funcs);
output->connector.dpms = DRM_MODE_DPMS_OFF;
drm_encoder_init(drm, &output->encoder, &tegra_rgb_encoder_funcs, DRM_MODE_ENCODER_LVDS, NULL);
drm_encoder_helper_add(&output->encoder, &tegra_rgb_encoder_helper_funcs);
- drm_mode_connector_attach_encoder(&output->connector, &output->encoder);
+ drm_connector_attach_encoder(&output->connector, &output->encoder);
drm_connector_register(&output->connector);
err = tegra_output_init(drm, output);
if (err < 0) {
dev_err(output->dev, "failed to initialize output: %d\n", err);
return err;
}
/*
* Other outputs can be attached to either display controller. The RGB
* outputs are an exception and work only with their parent display
* controller.
*/
output->encoder.possible_crtcs = drm_crtc_mask(&dc->base);
return 0;
}<sep>@@
expression e1,e2;
@@
- drm_mode_connector_attach_encoder
+ drm_connector_attach_encoder
(e1,e2)
<|end_of_text|> | 11,319 |
--- initial
+++ final
@@ -1,36 +1,36 @@
int nv04_tv_create(struct drm_connector *connector, struct dcb_output *entry) {
struct nouveau_encoder *nv_encoder;
struct drm_encoder *encoder;
struct drm_device *dev = connector->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
struct nvkm_i2c_bus *bus = nvkm_i2c_bus_find(i2c, entry->i2c_index);
int type, ret;
/* Ensure that we can talk to this encoder */
type = nv04_tv_identify(dev, entry->i2c_index);
if (type < 0) return type;
/* Allocate the necessary memory */
nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
if (!nv_encoder) return -ENOMEM;
/* Initialize the common members */
encoder = to_drm_encoder(nv_encoder);
drm_encoder_init(dev, encoder, &nv04_tv_funcs, DRM_MODE_ENCODER_TVDAC, NULL);
drm_encoder_helper_add(encoder, &nv04_tv_helper_funcs);
nv_encoder->enc_save = drm_i2c_encoder_save;
nv_encoder->enc_restore = drm_i2c_encoder_restore;
encoder->possible_crtcs = entry->heads;
encoder->possible_clones = 0;
nv_encoder->dcb = entry;
nv_encoder->or = ffs(entry->or) - 1;
/* Run the slave-specific initialization */
ret = drm_i2c_encoder_init(dev, to_encoder_slave(encoder), &bus->i2c, &nv04_tv_encoder_info[type].dev);
if (ret < 0) goto fail_cleanup;
/* Attach it to the specified connector. */
get_slave_funcs(encoder)->create_resources(encoder, connector);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
return 0;
fail_cleanup:
drm_encoder_cleanup(encoder);
kfree(nv_encoder);
return ret;
}<sep>@@
expression e1,e2;
@@
- drm_mode_connector_attach_encoder
+ drm_connector_attach_encoder
(e1,e2)
<|end_of_text|> | 11,337 |
--- initial
+++ final
@@ -1,25 +1,25 @@
static int dce_virtual_connector_encoder_init(struct amdgpu_device *adev, int index) {
struct drm_encoder *encoder;
struct drm_connector *connector;
/* add a new encoder */
encoder = kzalloc(sizeof(struct drm_encoder), GFP_KERNEL);
if (!encoder) return -ENOMEM;
encoder->possible_crtcs = 1 << index;
drm_encoder_init(adev->ddev, encoder, &dce_virtual_encoder_funcs, DRM_MODE_ENCODER_VIRTUAL, NULL);
drm_encoder_helper_add(encoder, &dce_virtual_encoder_helper_funcs);
connector = kzalloc(sizeof(struct drm_connector), GFP_KERNEL);
if (!connector) {
kfree(encoder);
return -ENOMEM;
}
/* add a new connector */
drm_connector_init(adev->ddev, connector, &dce_virtual_connector_funcs, DRM_MODE_CONNECTOR_VIRTUAL);
drm_connector_helper_add(connector, &dce_virtual_connector_helper_funcs);
connector->display_info.subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
drm_connector_register(connector);
/* link them */
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
return 0;
}<sep>@@
expression e1,e2;
@@
- drm_mode_connector_attach_encoder
+ drm_connector_attach_encoder
(e1,e2)
<|end_of_text|> | 11,276 |
--- initial
+++ final
@@ -1,4 +1,4 @@
void gma_connector_attach_encoder(struct gma_connector *connector, struct gma_encoder *encoder) {
connector->encoder = encoder;
- drm_mode_connector_attach_encoder(&connector->base, &encoder->base);
+ drm_connector_attach_encoder(&connector->base, &encoder->base);
}<sep>@@
expression e1,e2;
@@
- drm_mode_connector_attach_encoder
+ drm_connector_attach_encoder
(e1,e2)
<|end_of_text|> | 11,295 |
--- initial
+++ final
@@ -1,16 +1,16 @@
struct drm_connector *mdp4_lvds_connector_init(struct drm_device *dev, struct device_node *panel_node, struct drm_encoder *encoder) {
struct drm_connector *connector = NULL;
struct mdp4_lvds_connector *mdp4_lvds_connector;
mdp4_lvds_connector = kzalloc(sizeof(*mdp4_lvds_connector), GFP_KERNEL);
if (!mdp4_lvds_connector) return ERR_PTR(-ENOMEM);
mdp4_lvds_connector->encoder = encoder;
mdp4_lvds_connector->panel_node = panel_node;
connector = &mdp4_lvds_connector->base;
drm_connector_init(dev, connector, &mdp4_lvds_connector_funcs, DRM_MODE_CONNECTOR_LVDS);
drm_connector_helper_add(connector, &mdp4_lvds_connector_helper_funcs);
connector->polled = 0;
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
return connector;
}<sep>@@
expression e1,e2;
@@
- drm_mode_connector_attach_encoder
+ drm_connector_attach_encoder
(e1,e2)
<|end_of_text|> | 11,305 |
--- initial
+++ final
@@ -1,19 +1,19 @@
static int hdmi_create_connector(struct drm_encoder *encoder) {
struct hdmi_context *hdata = encoder_to_hdmi(encoder);
struct drm_connector *connector = &hdata->connector;
int ret;
connector->interlace_allowed = true;
connector->polled = DRM_CONNECTOR_POLL_HPD;
ret = drm_connector_init(hdata->drm_dev, connector, &hdmi_connector_funcs, DRM_MODE_CONNECTOR_HDMIA);
if (ret) {
DRM_ERROR("Failed to initialize connector with drm\n");
return ret;
}
drm_connector_helper_add(connector, &hdmi_connector_helper_funcs);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
if (hdata->bridge) {
ret = drm_bridge_attach(encoder, hdata->bridge, NULL);
if (ret) DRM_ERROR("Failed to attach bridge\n");
}
return ret;
}<sep>@@
expression e1,e2;
@@
- drm_mode_connector_attach_encoder
+ drm_connector_attach_encoder
(e1,e2)
<|end_of_text|> | 11,293 |
--- initial
+++ final
@@ -1,31 +1,31 @@
int cirrus_modeset_init(struct cirrus_device *cdev) {
struct drm_encoder *encoder;
struct drm_connector *connector;
int ret;
drm_mode_config_init(cdev->dev);
cdev->mode_info.mode_config_initialized = true;
cdev->dev->mode_config.max_width = CIRRUS_MAX_FB_WIDTH;
cdev->dev->mode_config.max_height = CIRRUS_MAX_FB_HEIGHT;
cdev->dev->mode_config.fb_base = cdev->mc.vram_base;
cdev->dev->mode_config.preferred_depth = 24;
/* don't prefer a shadow on virt GPU */
cdev->dev->mode_config.prefer_shadow = 0;
cirrus_crtc_init(cdev->dev);
encoder = cirrus_encoder_init(cdev->dev);
if (!encoder) {
DRM_ERROR("cirrus_encoder_init failed\n");
return -1;
}
connector = cirrus_vga_init(cdev->dev);
if (!connector) {
DRM_ERROR("cirrus_vga_init failed\n");
return -1;
}
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
ret = cirrus_fbdev_init(cdev);
if (ret) {
DRM_ERROR("cirrus_fbdev_init failed\n");
return ret;
}
return 0;
}<sep>@@
expression e1,e2;
@@
- drm_mode_connector_attach_encoder
+ drm_connector_attach_encoder
(e1,e2)
<|end_of_text|> | 11,274 |
--- initial
+++ final
@@ -1,68 +1,68 @@
static int sun4i_tv_bind(struct device *dev, struct device *master, void *data) {
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = data;
struct sun4i_drv *drv = drm->dev_private;
struct sun4i_tv *tv;
struct resource *res;
void __iomem *regs;
int ret;
tv = devm_kzalloc(dev, sizeof(*tv), GFP_KERNEL);
if (!tv) return -ENOMEM;
tv->drv = drv;
dev_set_drvdata(dev, tv);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
regs = devm_ioremap_resource(dev, res);
if (IS_ERR(regs)) {
dev_err(dev, "Couldn't map the TV encoder registers\n");
return PTR_ERR(regs);
}
tv->regs = devm_regmap_init_mmio(dev, regs, &sun4i_tv_regmap_config);
if (IS_ERR(tv->regs)) {
dev_err(dev, "Couldn't create the TV encoder regmap\n");
return PTR_ERR(tv->regs);
}
tv->reset = devm_reset_control_get(dev, NULL);
if (IS_ERR(tv->reset)) {
dev_err(dev, "Couldn't get our reset line\n");
return PTR_ERR(tv->reset);
}
ret = reset_control_deassert(tv->reset);
if (ret) {
dev_err(dev, "Couldn't deassert our reset line\n");
return ret;
}
tv->clk = devm_clk_get(dev, NULL);
if (IS_ERR(tv->clk)) {
dev_err(dev, "Couldn't get the TV encoder clock\n");
ret = PTR_ERR(tv->clk);
goto err_assert_reset;
}
clk_prepare_enable(tv->clk);
drm_encoder_helper_add(&tv->encoder, &sun4i_tv_helper_funcs);
ret = drm_encoder_init(drm, &tv->encoder, &sun4i_tv_funcs, DRM_MODE_ENCODER_TVDAC, NULL);
if (ret) {
dev_err(dev, "Couldn't initialise the TV encoder\n");
goto err_disable_clk;
}
tv->encoder.possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node);
if (!tv->encoder.possible_crtcs) {
ret = -EPROBE_DEFER;
goto err_disable_clk;
}
drm_connector_helper_add(&tv->connector, &sun4i_tv_comp_connector_helper_funcs);
ret = drm_connector_init(drm, &tv->connector, &sun4i_tv_comp_connector_funcs, DRM_MODE_CONNECTOR_Composite);
if (ret) {
dev_err(dev, "Couldn't initialise the Composite connector\n");
goto err_cleanup_connector;
}
tv->connector.interlace_allowed = true;
- drm_mode_connector_attach_encoder(&tv->connector, &tv->encoder);
+ drm_connector_attach_encoder(&tv->connector, &tv->encoder);
return 0;
err_cleanup_connector:
drm_encoder_cleanup(&tv->encoder);
err_disable_clk:
clk_disable_unprepare(tv->clk);
err_assert_reset:
reset_control_assert(tv->reset);
return ret;
}<sep>@@
expression e1,e2;
@@
- drm_mode_connector_attach_encoder
+ drm_connector_attach_encoder
(e1,e2)
<|end_of_text|> | 11,330 |
--- initial
+++ final
@@ -1,36 +1,36 @@
int arcpgu_drm_sim_init(struct drm_device *drm, struct device_node *np) {
struct arcpgu_drm_connector *arcpgu_connector;
struct drm_encoder *encoder;
struct drm_connector *connector;
int ret;
encoder = devm_kzalloc(drm->dev, sizeof(*encoder), GFP_KERNEL);
if (encoder == NULL) return -ENOMEM;
encoder->possible_crtcs = 1;
encoder->possible_clones = 0;
ret = drm_encoder_init(drm, encoder, &arcpgu_drm_encoder_funcs, DRM_MODE_ENCODER_VIRTUAL, NULL);
if (ret) return ret;
arcpgu_connector = devm_kzalloc(drm->dev, sizeof(*arcpgu_connector), GFP_KERNEL);
if (!arcpgu_connector) {
ret = -ENOMEM;
goto error_encoder_cleanup;
}
connector = &arcpgu_connector->connector;
drm_connector_helper_add(connector, &arcpgu_drm_connector_helper_funcs);
ret = drm_connector_init(drm, connector, &arcpgu_drm_connector_funcs, DRM_MODE_CONNECTOR_VIRTUAL);
if (ret < 0) {
dev_err(drm->dev, "failed to initialize drm connector\n");
goto error_encoder_cleanup;
}
- ret = drm_mode_connector_attach_encoder(connector, encoder);
+ ret = drm_connector_attach_encoder(connector, encoder);
if (ret < 0) {
dev_err(drm->dev, "could not attach connector to encoder\n");
drm_connector_unregister(connector);
goto error_connector_cleanup;
}
return 0;
error_connector_cleanup:
drm_connector_cleanup(connector);
error_encoder_cleanup:
drm_encoder_cleanup(encoder);
return ret;
}<sep>@@
expression e1,e2;
@@
- drm_mode_connector_attach_encoder
+ drm_connector_attach_encoder
(e1,e2)
<|end_of_text|> | 11,270 |
--- initial
+++ final
@@ -1,60 +1,60 @@
struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev, struct mdfld_dsi_connector *dsi_connector, const struct panel_funcs *p_funcs) {
struct mdfld_dsi_dpi_output *dpi_output = NULL;
struct mdfld_dsi_config *dsi_config;
struct drm_connector *connector = NULL;
struct drm_encoder *encoder = NULL;
int pipe;
u32 data;
int ret;
pipe = dsi_connector->pipe;
if (mdfld_get_panel_type(dev, pipe) != TC35876X) {
dsi_config = mdfld_dsi_get_config(dsi_connector);
/* panel hard-reset */
if (p_funcs->reset) {
ret = p_funcs->reset(pipe);
if (ret) {
DRM_ERROR("Panel %d hard-reset failed\n", pipe);
return NULL;
}
}
/* panel drvIC init */
if (p_funcs->drv_ic_init) p_funcs->drv_ic_init(dsi_config, pipe);
/* panel power mode detect */
ret = mdfld_dsi_get_power_mode(dsi_config, &data, false);
if (ret) {
DRM_ERROR("Panel %d get power mode failed\n", pipe);
dsi_connector->status = connector_status_disconnected;
} else {
DRM_INFO("pipe %d power mode 0x%x\n", pipe, data);
dsi_connector->status = connector_status_connected;
}
}
dpi_output = kzalloc(sizeof(struct mdfld_dsi_dpi_output), GFP_KERNEL);
if (!dpi_output) {
DRM_ERROR("No memory\n");
return NULL;
}
dpi_output->panel_on = 0;
dpi_output->dev = dev;
if (mdfld_get_panel_type(dev, pipe) != TC35876X) dpi_output->p_funcs = p_funcs;
dpi_output->first_boot = 1;
/*get fixed mode*/
dsi_config = mdfld_dsi_get_config(dsi_connector);
/*create drm encoder object*/
connector = &dsi_connector->base.base;
encoder = &dpi_output->base.base.base;
drm_encoder_init(dev, encoder, p_funcs->encoder_funcs, DRM_MODE_ENCODER_LVDS, NULL);
drm_encoder_helper_add(encoder, p_funcs->encoder_helper_funcs);
/*attach to given connector*/
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
/*set possible crtcs and clones*/
if (dsi_connector->pipe) {
encoder->possible_crtcs = (1 << 2);
encoder->possible_clones = (1 << 1);
} else {
encoder->possible_crtcs = (1 << 0);
encoder->possible_clones = (1 << 0);
}
dsi_connector->base.encoder = &dpi_output->base.base;
return &dpi_output->base;
}<sep>@@
expression e1,e2;
@@
- drm_mode_connector_attach_encoder
+ drm_connector_attach_encoder
(e1,e2)
<|end_of_text|> | 11,304 |
--- initial
+++ final
@@ -1,22 +1,22 @@
struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi) {
struct drm_connector *connector = NULL;
struct hdmi_connector *hdmi_connector;
int ret;
hdmi_connector = kzalloc(sizeof(*hdmi_connector), GFP_KERNEL);
if (!hdmi_connector) return ERR_PTR(-ENOMEM);
hdmi_connector->hdmi = hdmi;
INIT_WORK(&hdmi_connector->hpd_work, msm_hdmi_hotplug_work);
connector = &hdmi_connector->base;
drm_connector_init(hdmi->dev, connector, &hdmi_connector_funcs, DRM_MODE_CONNECTOR_HDMIA);
drm_connector_helper_add(connector, &msm_hdmi_connector_helper_funcs);
connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
ret = hpd_enable(hdmi_connector);
if (ret) {
dev_err(&hdmi->pdev->dev, "failed to enable HPD: %d\n", ret);
return ERR_PTR(ret);
}
- drm_mode_connector_attach_encoder(connector, hdmi->encoder);
+ drm_connector_attach_encoder(connector, hdmi->encoder);
return connector;
}<sep>@@
expression e1,e2;
@@
- drm_mode_connector_attach_encoder
+ drm_connector_attach_encoder
(e1,e2)
<|end_of_text|> | 11,296 |
--- initial
+++ final
@@ -1,13 +1,13 @@
static int imx_tve_register(struct drm_device *drm, struct imx_tve *tve) {
int encoder_type;
int ret;
encoder_type = tve->mode == TVE_MODE_VGA ? DRM_MODE_ENCODER_DAC : DRM_MODE_ENCODER_TVDAC;
ret = imx_drm_encoder_parse_of(drm, &tve->encoder, tve->dev->of_node);
if (ret) return ret;
drm_encoder_helper_add(&tve->encoder, &imx_tve_encoder_helper_funcs);
drm_encoder_init(drm, &tve->encoder, &imx_tve_encoder_funcs, encoder_type, NULL);
drm_connector_helper_add(&tve->connector, &imx_tve_connector_helper_funcs);
drm_connector_init(drm, &tve->connector, &imx_tve_connector_funcs, DRM_MODE_CONNECTOR_VGA);
- drm_mode_connector_attach_encoder(&tve->connector, &tve->encoder);
+ drm_connector_attach_encoder(&tve->connector, &tve->encoder);
return 0;
}<sep>@@
expression e1,e2;
@@
- drm_mode_connector_attach_encoder
+ drm_connector_attach_encoder
(e1,e2)
<|end_of_text|> | 11,300 |
--- initial
+++ final
@@ -1,6 +1,6 @@
static void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx) {
if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated) return;
plane_atomic_disable(dc, pipe_ctx);
apply_DEGVIDCN10_253_wa(dc);
- dm_logger_write(dc->ctx->logger, LOG_DC, "Power down front end %d\n", pipe_ctx->pipe_idx);
+ DC_LOG_DC(dc->ctx->logger, "Power down front end %d\n", pipe_ctx->pipe_idx);
}<sep>@@
expression e1;
@@
- dm_logger_write(e1,LOG_DC,
+ DC_LOG_DC(e1,
...)<|end_of_text|> | 10,802 |
--- initial
+++ final
@@ -1,39 +1,39 @@
static bool read_command(struct aux_engine *engine, struct i2caux_transaction_request *request, bool middle_of_transaction) {
struct read_command_context ctx;
ctx.buffer = request->payload.data;
ctx.current_read_length = request->payload.length;
ctx.offset = 0;
ctx.timed_out_retry_aux = 0;
ctx.invalid_reply_retry_aux = 0;
ctx.defer_retry_aux = 0;
ctx.defer_retry_i2c = 0;
ctx.invalid_reply_retry_aux_on_ack = 0;
ctx.transaction_complete = false;
ctx.operation_succeeded = true;
if (request->payload.address_space == I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) {
ctx.request.type = AUX_TRANSACTION_TYPE_DP;
ctx.request.action = I2CAUX_TRANSACTION_ACTION_DP_READ;
ctx.request.address = request->payload.address;
} else if (request->payload.address_space == I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C) {
ctx.request.type = AUX_TRANSACTION_TYPE_I2C;
ctx.request.action = middle_of_transaction ? I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT : I2CAUX_TRANSACTION_ACTION_I2C_READ;
ctx.request.address = request->payload.address >> 1;
} else {
/* in DAL2, there was no return in such case */
BREAK_TO_DEBUGGER();
return false;
}
ctx.request.delay = 0;
do {
memset(ctx.buffer + ctx.offset, 0, ctx.current_read_length);
ctx.request.data = ctx.buffer + ctx.offset;
ctx.request.length = ctx.current_read_length;
process_read_request(engine, &ctx);
request->status = ctx.status;
if (ctx.operation_succeeded && !ctx.transaction_complete)
if (ctx.request.type == AUX_TRANSACTION_TYPE_I2C) msleep(engine->delay);
} while (ctx.operation_succeeded && !ctx.transaction_complete);
- if (request->payload.address_space == I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) { dm_logger_write(engine->base.ctx->logger, LOG_I2C_AUX, "READ: addr:0x%x value:0x%x Result:%d", request->payload.address, request->payload.data[0], ctx.operation_succeeded); }
+ if (request->payload.address_space == I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) { DC_LOG_I2C_AUX(engine->base.ctx->logger, "READ: addr:0x%x value:0x%x Result:%d", request->payload.address, request->payload.data[0], ctx.operation_succeeded); }
request->payload.length = ctx.reply.length;
return ctx.operation_succeeded;
}<sep>@@
expression e1;
@@
- dm_logger_write(e1,LOG_I2C_AUX,
+ DC_LOG_I2C_AUX(e1,
...)
<|end_of_text|> | 10,729 |
--- initial
+++ final
@@ -1,43 +1,43 @@
static bool hpd_rx_irq_check_link_loss_status(struct dc_link *link, union hpd_irq_data *hpd_irq_dpcd_data) {
uint8_t irq_reg_rx_power_state = 0;
enum dc_status dpcd_result = DC_ERROR_UNEXPECTED;
union lane_status lane_status;
uint32_t lane;
bool sink_status_changed;
bool return_code;
sink_status_changed = false;
return_code = false;
if (link->cur_link_settings.lane_count == 0) return return_code;
/*1. Check that Link Status changed, before re-training.*/
/*parse lane status*/
for (lane = 0; lane < link->cur_link_settings.lane_count; lane++) {
/* check status of lanes 0,1
* changed DpcdAddress_Lane01Status (0x202)
*/
lane_status.raw = get_nibble_at_index(&hpd_irq_dpcd_data->bytes.lane01_status.raw, lane);
if (!lane_status.bits.CHANNEL_EQ_DONE_0 || !lane_status.bits.CR_DONE_0 || !lane_status.bits.SYMBOL_LOCKED_0) {
/* if one of the channel equalization, clock
* recovery or symbol lock is dropped
* consider it as (link has been
* dropped) dp sink status has changed
*/
sink_status_changed = true;
break;
}
}
/* Check interlane align.*/
if (sink_status_changed || !hpd_irq_dpcd_data->bytes.lane_status_updated.bits.INTERLANE_ALIGN_DONE) {
- dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ, "%s: Link Status changed.\n", __func__);
+ DC_LOG_HW_HPD_IRQ(link->ctx->logger, "%s: Link Status changed.\n", __func__);
return_code = true;
/*2. Check that we can handle interrupt: Not in FS DOS,
* Not in "Display Timeout" state, Link is trained.
*/
dpcd_result = core_link_read_dpcd(link, DP_SET_POWER, &irq_reg_rx_power_state, sizeof(irq_reg_rx_power_state));
if (dpcd_result != DC_OK) {
- dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ, "%s: DPCD read failed to obtain power state.\n", __func__);
+ DC_LOG_HW_HPD_IRQ(link->ctx->logger, "%s: DPCD read failed to obtain power state.\n", __func__);
} else {
if (irq_reg_rx_power_state != DP_SET_POWER_D0) return_code = false;
}
}
return return_code;
}<sep>@@
expression e1;
@@
- dm_logger_write(e1,LOG_HW_HPD_IRQ,
+ DC_LOG_HW_HPD_IRQ(e1,
...)
<|end_of_text|> | 10,751 |
--- initial
+++ final
@@ -1,47 +1,47 @@
static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx) {
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->sink->link;
struct link_encoder *link_encoder = link->link_enc;
struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc;
struct dp_mst_stream_allocation_table proposed_table = {0};
struct fixed31_32 avg_time_slots_per_mtp;
struct fixed31_32 pbn;
struct fixed31_32 pbn_per_slot;
uint8_t i;
/* enable_link_dp_mst already check link->enabled_stream_count
* and stream is in link->stream[]. This is called during set mode,
* stream_enc is available.
*/
/* get calculate VC payload for stream: stream_alloc */
if (dm_helpers_dp_mst_write_payload_allocation_table(stream->ctx, stream, &proposed_table, true)) {
update_mst_stream_alloc_table(link, pipe_ctx->stream_res.stream_enc, &proposed_table);
} else
- dm_logger_write(link->ctx->logger, LOG_WARNING,
- "Failed to update"
- "MST allocation table for"
- "pipe idx:%d\n",
- pipe_ctx->pipe_idx);
- dm_logger_write(link->ctx->logger, LOG_MST,
- "%s "
- "stream_count: %d: \n ",
- __func__, link->mst_stream_alloc_table.stream_count);
+ DC_LOG_WARNING(link->ctx->logger,
+ "Failed to update"
+ "MST allocation table for"
+ "pipe idx:%d\n",
+ pipe_ctx->pipe_idx);
+ DC_LOG_MST(link->ctx->logger,
+ "%s "
+ "stream_count: %d: \n ",
+ __func__, link->mst_stream_alloc_table.stream_count);
for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
- dm_logger_write(link->ctx->logger, LOG_MST,
- "stream_enc[%d]: 0x%x "
- "stream[%d].vcp_id: %d "
- "stream[%d].slot_count: %d\n",
- i, link->mst_stream_alloc_table.stream_allocations[i].stream_enc, i, link->mst_stream_alloc_table.stream_allocations[i].vcp_id, i, link->mst_stream_alloc_table.stream_allocations[i].slot_count);
+ DC_LOG_MST(link->ctx->logger,
+ "stream_enc[%d]: 0x%x "
+ "stream[%d].vcp_id: %d "
+ "stream[%d].slot_count: %d\n",
+ i, link->mst_stream_alloc_table.stream_allocations[i].stream_enc, i, link->mst_stream_alloc_table.stream_allocations[i].vcp_id, i, link->mst_stream_alloc_table.stream_allocations[i].slot_count);
}
ASSERT(proposed_table.stream_count > 0);
/* program DP source TX for payload */
link_encoder->funcs->update_mst_stream_allocation_table(link_encoder, &link->mst_stream_alloc_table);
/* send down message */
dm_helpers_dp_mst_poll_for_allocation_change_trigger(stream->ctx, stream);
dm_helpers_dp_mst_send_payload_allocation(stream->ctx, stream, true);
/* slot X.Y for only current stream */
pbn_per_slot = get_pbn_per_slot(stream);
pbn = get_pbn_from_timing(pipe_ctx);
avg_time_slots_per_mtp = dal_fixed31_32_div(pbn, pbn_per_slot);
stream_encoder->funcs->set_mst_bandwidth(stream_encoder, avg_time_slots_per_mtp);
return DC_OK;
}<sep>@@
expression e1;
@@
- dm_logger_write(e1,LOG_MST,
+ DC_LOG_MST(e1,
...)
@@
expression e1;
@@
- dm_logger_write(e1,LOG_WARNING,
+ DC_LOG_WARNING(e1,
...)
<|end_of_text|> | 10,739 |
--- initial
+++ final
@@ -1,15 +1,15 @@
static void dpcd_set_link_settings(struct dc_link *link, const struct link_training_settings *lt_settings) {
uint8_t rate = (uint8_t)(lt_settings->link_settings.link_rate);
union down_spread_ctrl downspread = {{0}};
union lane_count_set lane_count_set = {{0}};
uint8_t link_set_buffer[2];
downspread.raw = (uint8_t)(lt_settings->link_settings.link_spread);
lane_count_set.bits.LANE_COUNT_SET = lt_settings->link_settings.lane_count;
lane_count_set.bits.ENHANCED_FRAMING = 1;
lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED;
link_set_buffer[0] = rate;
link_set_buffer[1] = lane_count_set.raw;
core_link_write_dpcd(link, DP_LINK_BW_SET, link_set_buffer, 2);
core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL, &downspread.raw, sizeof(downspread));
- dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING, "%s\n %x rate = %x\n %x lane = %x\n %x spread = %x\n", __func__, DP_LINK_BW_SET, lt_settings->link_settings.link_rate, DP_LANE_COUNT_SET, lt_settings->link_settings.lane_count, DP_DOWNSPREAD_CTRL, lt_settings->link_settings.link_spread);
+ DC_LOG_HW_LINK_TRAINING(link->ctx->logger, "%s\n %x rate = %x\n %x lane = %x\n %x spread = %x\n", __func__, DP_LINK_BW_SET, lt_settings->link_settings.link_rate, DP_LANE_COUNT_SET, lt_settings->link_settings.lane_count, DP_DOWNSPREAD_CTRL, lt_settings->link_settings.link_spread);
}<sep>@@
expression e1;
@@
- dm_logger_write(e1,LOG_HW_LINK_TRAINING,
+ DC_LOG_HW_LINK_TRAINING(e1,
...)
<|end_of_text|> | 10,747 |
--- initial
+++ final
@@ -1,47 +1,47 @@
static enum bp_result set_pixel_clock_v7(struct bios_parser *bp, struct bp_pixel_clock_parameters *bp_params) {
enum bp_result result = BP_RESULT_FAILURE;
struct set_pixel_clock_parameter_v1_7 clk;
uint8_t controller_id;
uint32_t pll_id;
memset(&clk, 0, sizeof(clk));
if (bp->cmd_helper->clock_source_id_to_atom(bp_params->pll_id, &pll_id) && bp->cmd_helper->controller_id_to_atom(bp_params->controller_id, &controller_id)) {
/* Note: VBIOS still wants to use ucCRTC name which is now
* 1 byte in ULONG
*typedef struct _CRTC_PIXEL_CLOCK_FREQ
*{
* target the pixel clock to drive the CRTC timing.
* ULONG ulPixelClock:24;
* 0 means disable PPLL/DCPLL. Expanded to 24 bits comparing to
* previous version.
* ATOM_CRTC1~6, indicate the CRTC controller to
* ULONG ucCRTC:8;
* drive the pixel clock. not used for DCPLL case.
*}CRTC_PIXEL_CLOCK_FREQ;
*union
*{
* pixel clock and CRTC id frequency
* CRTC_PIXEL_CLOCK_FREQ ulCrtcPclkFreq;
* ULONG ulDispEngClkFreq; dispclk frequency
*};
*/
clk.crtc_id = controller_id;
clk.pll_id = (uint8_t)pll_id;
clk.encoderobjid = bp->cmd_helper->encoder_id_to_atom(dal_graphics_object_id_get_encoder_id(bp_params->encoder_object_id));
clk.encoder_mode = (uint8_t)bp->cmd_helper->encoder_mode_bp_to_atom(bp_params->signal_type, false);
/* We need to convert from KHz units into 10KHz units */
clk.pixclk_100hz = cpu_to_le32(bp_params->target_pixel_clock * 10);
clk.deep_color_ratio = (uint8_t)bp->cmd_helper->transmitter_color_depth_to_atom(bp_params->color_depth);
- dm_logger_write(bp->base.ctx->logger, LOG_BIOS,
- "%s:program display clock = %d"
- "colorDepth = %d\n",
- __func__, bp_params->target_pixel_clock, bp_params->color_depth);
+ DC_LOG_BIOS(bp->base.ctx->logger,
+ "%s:program display clock = %d"
+ "colorDepth = %d\n",
+ __func__, bp_params->target_pixel_clock, bp_params->color_depth);
if (bp_params->flags.FORCE_PROGRAMMING_OF_PLL) clk.miscinfo |= PIXEL_CLOCK_V7_MISC_FORCE_PROG_PPLL;
if (bp_params->flags.PROGRAM_PHY_PLL_ONLY) clk.miscinfo |= PIXEL_CLOCK_V7_MISC_PROG_PHYPLL;
if (bp_params->flags.SUPPORT_YUV_420) clk.miscinfo |= PIXEL_CLOCK_V7_MISC_YUV420_MODE;
if (bp_params->flags.SET_XTALIN_REF_SRC) clk.miscinfo |= PIXEL_CLOCK_V7_MISC_REF_DIV_SRC_XTALIN;
if (bp_params->flags.SET_GENLOCK_REF_DIV_SRC) clk.miscinfo |= PIXEL_CLOCK_V7_MISC_REF_DIV_SRC_GENLK;
if (bp_params->signal_type == SIGNAL_TYPE_DVI_DUAL_LINK) clk.miscinfo |= PIXEL_CLOCK_V7_MISC_DVI_DUALLINK_EN;
if (EXEC_BIOS_CMD_TABLE(setpixelclock, clk)) result = BP_RESULT_OK;
}
return result;
}<sep>@@
expression e1;
@@
- dm_logger_write(e1,LOG_BIOS,
+ DC_LOG_BIOS(e1,
...)
<|end_of_text|> | 10,733 |
--- initial
+++ final
@@ -1,46 +1,46 @@
static void dce110_se_setup_hdmi_audio(struct stream_encoder *enc, const struct audio_crtc_info *crtc_info) {
struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
struct audio_clock_info audio_clock_info = {0};
uint32_t max_packets_per_line;
/* For now still do calculation, although this field is ignored when
above HDMI_PACKET_GEN_VERSION set to 1 */
max_packets_per_line = calc_max_audio_packets_per_line(crtc_info);
/* HDMI_AUDIO_PACKET_CONTROL */
REG_UPDATE_2(HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_PACKETS_PER_LINE, max_packets_per_line, HDMI_AUDIO_DELAY_EN, 1);
/* AFMT_AUDIO_PACKET_CONTROL */
REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1);
/* AFMT_AUDIO_PACKET_CONTROL2 */
REG_UPDATE_2(AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_LAYOUT_OVRD, 0, AFMT_60958_OSF_OVRD, 0);
/* HDMI_ACR_PACKET_CONTROL */
REG_UPDATE_3(HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, 1, HDMI_ACR_SOURCE, 0, HDMI_ACR_AUDIO_PRIORITY, 0);
/* Program audio clock sample/regeneration parameters */
get_audio_clock_info(crtc_info->color_depth, crtc_info->requested_pixel_clock, crtc_info->calculated_pixel_clock, &audio_clock_info);
- dm_logger_write(enc->ctx->logger, LOG_HW_AUDIO,
+ DC_LOG_HW_AUDIO(enc->ctx->logger,
"\n%s:Input::requested_pixel_clock = %d"
"calculated_pixel_clock = %d \n",
__func__, crtc_info->requested_pixel_clock, crtc_info->calculated_pixel_clock);
/* HDMI_ACR_32_0__HDMI_ACR_CTS_32_MASK */
REG_UPDATE(HDMI_ACR_32_0, HDMI_ACR_CTS_32, audio_clock_info.cts_32khz);
/* HDMI_ACR_32_1__HDMI_ACR_N_32_MASK */
REG_UPDATE(HDMI_ACR_32_1, HDMI_ACR_N_32, audio_clock_info.n_32khz);
/* HDMI_ACR_44_0__HDMI_ACR_CTS_44_MASK */
REG_UPDATE(HDMI_ACR_44_0, HDMI_ACR_CTS_44, audio_clock_info.cts_44khz);
/* HDMI_ACR_44_1__HDMI_ACR_N_44_MASK */
REG_UPDATE(HDMI_ACR_44_1, HDMI_ACR_N_44, audio_clock_info.n_44khz);
/* HDMI_ACR_48_0__HDMI_ACR_CTS_48_MASK */
REG_UPDATE(HDMI_ACR_48_0, HDMI_ACR_CTS_48, audio_clock_info.cts_48khz);
/* HDMI_ACR_48_1__HDMI_ACR_N_48_MASK */
REG_UPDATE(HDMI_ACR_48_1, HDMI_ACR_N_48, audio_clock_info.n_48khz);
/* Video driver cannot know in advance which sample rate will
be used by HD Audio driver
HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE field is
programmed below in interruppt callback */
/* AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L_MASK &
AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY_MASK */
REG_UPDATE_2(AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, 1, AFMT_60958_CS_CLOCK_ACCURACY, 0);
/* AFMT_60958_1 AFMT_60958_CS_CHALNNEL_NUMBER_R */
REG_UPDATE(AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, 2);
/*AFMT_60958_2 now keep this settings until
* Programming guide comes out*/
REG_UPDATE_6(AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, 3, AFMT_60958_CS_CHANNEL_NUMBER_3, 4, AFMT_60958_CS_CHANNEL_NUMBER_4, 5, AFMT_60958_CS_CHANNEL_NUMBER_5, 6, AFMT_60958_CS_CHANNEL_NUMBER_6, 7, AFMT_60958_CS_CHANNEL_NUMBER_7, 8);
}<sep>@@
expression e1;
@@
- dm_logger_write(e1,LOG_HW_AUDIO,
+ DC_LOG_HW_AUDIO(e1,
...)
<|end_of_text|> | 10,794 |
--- initial
+++ final
@@ -1,10 +1,10 @@
static bool dce_abm_set_backlight_level(struct abm *abm, unsigned int backlight_level, unsigned int frame_ramp, unsigned int controller_id, bool use_smooth_brightness) {
struct dce_abm *abm_dce = TO_DCE_ABM(abm);
- dm_logger_write(abm->ctx->logger, LOG_BACKLIGHT, "New Backlight level: %d (0x%X)\n", backlight_level, backlight_level);
+ DC_LOG_BACKLIGHT(abm->ctx->logger, "New Backlight level: %d (0x%X)\n", backlight_level, backlight_level);
/* If DMCU is in reset state, DMCU is uninitialized */
if (use_smooth_brightness)
dmcu_set_backlight_level(abm_dce, backlight_level, frame_ramp, controller_id);
else
driver_set_backlight_level(abm_dce, backlight_level);
return true;
}<sep>@@
expression e1;
@@
- dm_logger_write(e1,LOG_BACKLIGHT,
+ DC_LOG_BACKLIGHT(e1,
...)
<|end_of_text|> | 10,776 |
--- initial
+++ final
@@ -1,29 +1,29 @@
static bool dce110_validate_bandwidth(struct dc *dc, struct dc_state *context) {
bool result = false;
- dm_logger_write(dc->ctx->logger, LOG_BANDWIDTH_CALCS, "%s: start", __func__);
+ DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger, "%s: start", __func__);
if (bw_calcs(dc->ctx, dc->bw_dceip, dc->bw_vbios, context->res_ctx.pipe_ctx, dc->res_pool->pipe_count, &context->bw.dce)) result = true;
- if (!result) dm_logger_write(dc->ctx->logger, LOG_BANDWIDTH_VALIDATION, "%s: %dx%d@%d Bandwidth validation failed!\n", __func__, context->streams[0]->timing.h_addressable, context->streams[0]->timing.v_addressable, context->streams[0]->timing.pix_clk_khz);
+ if (!result) DC_LOG_BANDWIDTH_VALIDATION(dc->ctx->logger, "%s: %dx%d@%d Bandwidth validation failed!\n", __func__, context->streams[0]->timing.h_addressable, context->streams[0]->timing.v_addressable, context->streams[0]->timing.pix_clk_khz);
if (memcmp(&dc->current_state->bw.dce, &context->bw.dce, sizeof(context->bw.dce))) {
struct log_entry log_entry;
dm_logger_open(dc->ctx->logger, &log_entry, LOG_BANDWIDTH_CALCS);
dm_logger_append(&log_entry,
"%s: finish,\n"
"nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
"stutMark_b: %d stutMark_a: %d\n",
__func__, context->bw.dce.nbp_state_change_wm_ns[0].b_mark, context->bw.dce.nbp_state_change_wm_ns[0].a_mark, context->bw.dce.urgent_wm_ns[0].b_mark, context->bw.dce.urgent_wm_ns[0].a_mark, context->bw.dce.stutter_exit_wm_ns[0].b_mark, context->bw.dce.stutter_exit_wm_ns[0].a_mark);
dm_logger_append(&log_entry,
"nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
"stutMark_b: %d stutMark_a: %d\n",
context->bw.dce.nbp_state_change_wm_ns[1].b_mark, context->bw.dce.nbp_state_change_wm_ns[1].a_mark, context->bw.dce.urgent_wm_ns[1].b_mark, context->bw.dce.urgent_wm_ns[1].a_mark, context->bw.dce.stutter_exit_wm_ns[1].b_mark, context->bw.dce.stutter_exit_wm_ns[1].a_mark);
dm_logger_append(&log_entry,
"nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
"stutMark_b: %d stutMark_a: %d stutter_mode_enable: %d\n",
context->bw.dce.nbp_state_change_wm_ns[2].b_mark, context->bw.dce.nbp_state_change_wm_ns[2].a_mark, context->bw.dce.urgent_wm_ns[2].b_mark, context->bw.dce.urgent_wm_ns[2].a_mark, context->bw.dce.stutter_exit_wm_ns[2].b_mark, context->bw.dce.stutter_exit_wm_ns[2].a_mark, context->bw.dce.stutter_mode_enable);
dm_logger_append(&log_entry,
"cstate: %d pstate: %d nbpstate: %d sync: %d dispclk: %d\n"
"sclk: %d sclk_sleep: %d yclk: %d blackout_recovery_time_us: %d\n",
context->bw.dce.cpuc_state_change_enable, context->bw.dce.cpup_state_change_enable, context->bw.dce.nbp_state_change_enable, context->bw.dce.all_displays_in_sync, context->bw.dce.dispclk_khz, context->bw.dce.sclk_khz, context->bw.dce.sclk_deep_sleep_khz, context->bw.dce.yclk_khz, context->bw.dce.blackout_recovery_time_us);
dm_logger_close(&log_entry);
}
return result;
}<sep>@@
expression e1;
@@
- dm_logger_write(e1,LOG_BANDWIDTH_VALIDATION,
+ DC_LOG_BANDWIDTH_VALIDATION(e1,
...)
@@
expression e1;
@@
- dm_logger_write(e1,LOG_BANDWIDTH_CALCS,
+ DC_LOG_BANDWIDTH_CALCS(e1,
...)
<|end_of_text|> | 10,764 |
--- initial
+++ final
@@ -1,24 +1,24 @@
void hwss_edp_power_control(struct dc_link *link, bool power_up) {
struct dc_context *ctx = link->ctx;
struct dce_hwseq *hwseq = ctx->dc->hwseq;
struct bp_transmitter_control cntl = {0};
enum bp_result bp_result;
if (dal_graphics_object_id_get_connector_id(link->link_enc->connector) != CONNECTOR_ID_EDP) {
BREAK_TO_DEBUGGER();
return;
}
if (power_up != is_panel_powered_on(hwseq)) {
/* Send VBIOS command to prompt eDP panel power */
- dm_logger_write(ctx->logger, LOG_HW_RESUME_S3, "%s: Panel Power action: %s\n", __func__, (power_up ? "On" : "Off"));
+ DC_LOG_HW_RESUME_S3(ctx->logger, "%s: Panel Power action: %s\n", __func__, (power_up ? "On" : "Off"));
cntl.action = power_up ? TRANSMITTER_CONTROL_POWER_ON : TRANSMITTER_CONTROL_POWER_OFF;
cntl.transmitter = link->link_enc->transmitter;
cntl.connector_obj_id = link->link_enc->connector;
cntl.coherent = false;
cntl.lanes_number = LANE_COUNT_FOUR;
cntl.hpd_sel = link->link_enc->hpd_source;
bp_result = link_transmitter_control(ctx->dc_bios, &cntl);
- if (bp_result != BP_RESULT_OK) dm_logger_write(ctx->logger, LOG_ERROR, "%s: Panel Power bp_result: %d\n", __func__, bp_result);
+ if (bp_result != BP_RESULT_OK) DC_LOG_ERROR(ctx->logger, "%s: Panel Power bp_result: %d\n", __func__, bp_result);
} else {
- dm_logger_write(ctx->logger, LOG_HW_RESUME_S3, "%s: Skipping Panel Power action: %s\n", __func__, (power_up ? "On" : "Off"));
+ DC_LOG_HW_RESUME_S3(ctx->logger, "%s: Skipping Panel Power action: %s\n", __func__, (power_up ? "On" : "Off"));
}
}<sep>@@
expression e1;
@@
- dm_logger_write(e1,LOG_HW_RESUME_S3,
+ DC_LOG_HW_RESUME_S3(e1,
...)
@@
expression e1;
@@
- dm_logger_write(e1,LOG_ERROR,
+ DC_LOG_ERROR(e1,
...)
<|end_of_text|> | 10,762 |
--- initial
+++ final
@@ -1,38 +1,38 @@
static void reset_back_end_for_pipe(struct dc *dc, struct pipe_ctx *pipe_ctx, struct dc_state *context) {
int i;
if (pipe_ctx->stream_res.stream_enc == NULL) {
pipe_ctx->stream = NULL;
return;
}
if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
/* DPMS may already disable */
if (!pipe_ctx->stream->dpms_off)
core_link_disable_stream(pipe_ctx, FREE_ACQUIRED_RESOURCE);
else if (pipe_ctx->stream_res.audio) {
/*
* if stream is already disabled outside of commit streams path,
* audio disable was skipped. Need to do it here
*/
pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
if (dc->caps.dynamic_audio == true) {
/*we have to dynamic arbitrate the audio endpoints*/
pipe_ctx->stream_res.audio = NULL;
/*we free the resource, need reset is_audio_acquired*/
update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, pipe_ctx->stream_res.audio, false);
}
}
}
/* by upper caller loop, parent pipe: pipe0, will be reset last.
* back end share by all pipes and will be disable only when disable
* parent pipe.
*/
if (pipe_ctx->top_pipe == NULL) {
pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
}
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (&dc->current_state->res_ctx.pipe_ctx[i] == pipe_ctx) break;
if (i == dc->res_pool->pipe_count) return;
pipe_ctx->stream = NULL;
- dm_logger_write(dc->ctx->logger, LOG_DEBUG, "Reset back end for pipe %d, tg:%d\n", pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
+ DC_LOG_DEBUG(dc->ctx->logger, "Reset back end for pipe %d, tg:%d\n", pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
}<sep>@@
expression e1;
@@
- dm_logger_write(e1,LOG_DEBUG,
+ DC_LOG_DEBUG(e1,
...)
<|end_of_text|> | 10,805 |
--- initial
+++ final
@@ -1,10 +1,10 @@
void dce_aud_az_enable(struct audio *audio) {
struct dce_audio *aud = DCE_AUD(audio);
uint32_t value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL);
set_reg_field_value(value, 1, AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, CLOCK_GATING_DISABLE);
set_reg_field_value(value, 1, AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, AUDIO_ENABLED);
AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, value);
set_reg_field_value(value, 0, AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, CLOCK_GATING_DISABLE);
AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, value);
- dm_logger_write(CTX->logger, LOG_HW_AUDIO, "\n\t========= AUDIO:dce_aud_az_enable: index: %u data: 0x%x\n", audio->inst, value);
+ DC_LOG_HW_AUDIO(CTX->logger, "\n\t========= AUDIO:dce_aud_az_enable: index: %u data: 0x%x\n", audio->inst, value);
}<sep>@@
expression e1;
@@
- dm_logger_write(e1,LOG_HW_AUDIO,
+ DC_LOG_HW_AUDIO(e1,
...)
<|end_of_text|> | 10,778 |
--- initial
+++ final
@@ -1,16 +1,16 @@
static void wait_for_fbc_state_changed(struct dce110_compressor *cp110, bool enabled) {
uint8_t counter = 0;
uint32_t addr = mmFBC_STATUS;
uint32_t value;
while (counter < 10) {
value = dm_read_reg(cp110->base.ctx, addr);
if (get_reg_field_value(value, FBC_STATUS, FBC_ENABLE_STATUS) == enabled) break;
msleep(10);
counter++;
}
if (counter == 10) {
- dm_logger_write(cp110->base.ctx->logger, LOG_WARNING, "%s: wait counter exceeded, changes to HW not applied", __func__);
+ DC_LOG_WARNING(cp110->base.ctx->logger, "%s: wait counter exceeded, changes to HW not applied", __func__);
} else {
- dm_logger_write(cp110->base.ctx->logger, LOG_SYNC, "FBC status changed to %d", enabled);
+ DC_LOG_SYNC(cp110->base.ctx->logger, "FBC status changed to %d", enabled);
}
}<sep>@@
expression e1;
@@
- dm_logger_write(e1,LOG_SYNC,
+ DC_LOG_SYNC(e1,
...)
@@
expression e1;
@@
- dm_logger_write(e1,LOG_WARNING,
+ DC_LOG_WARNING(e1,
...)
<|end_of_text|> | 10,759 |
--- initial
+++ final
@@ -1,41 +1,41 @@
bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) {
const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
struct view recout_skip = {0};
bool res = false;
/* Important: scaling ratio calculation requires pixel format,
* lb depth calculation requires recout and taps require scaling ratios.
* Inits require viewport, taps, ratios and recout of split pipe
*/
pipe_ctx->plane_res.scl_data.format = convert_pixel_format_to_dalsurface(pipe_ctx->plane_state->format);
calculate_scaling_ratios(pipe_ctx);
calculate_viewport(pipe_ctx);
if (pipe_ctx->plane_res.scl_data.viewport.height < 16 || pipe_ctx->plane_res.scl_data.viewport.width < 16) return false;
calculate_recout(pipe_ctx, &recout_skip);
/**
* Setting line buffer pixel depth to 24bpp yields banding
* on certain displays, such as the Sharp 4k
*/
pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP;
pipe_ctx->plane_res.scl_data.recout.x += timing->h_border_left;
pipe_ctx->plane_res.scl_data.recout.y += timing->v_border_top;
pipe_ctx->plane_res.scl_data.h_active = timing->h_addressable + timing->h_border_left + timing->h_border_right;
pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable + timing->v_border_top + timing->v_border_bottom;
/* Taps calculations */
if (pipe_ctx->plane_res.xfm != NULL) res = pipe_ctx->plane_res.xfm->funcs->transform_get_optimal_number_of_taps(pipe_ctx->plane_res.xfm, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality);
if (pipe_ctx->plane_res.dpp != NULL) res = pipe_ctx->plane_res.dpp->funcs->dpp_get_optimal_number_of_taps(pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality);
if (!res) {
/* Try 24 bpp linebuffer */
pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_24BPP;
if (pipe_ctx->plane_res.xfm != NULL) res = pipe_ctx->plane_res.xfm->funcs->transform_get_optimal_number_of_taps(pipe_ctx->plane_res.xfm, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality);
if (pipe_ctx->plane_res.dpp != NULL) res = pipe_ctx->plane_res.dpp->funcs->dpp_get_optimal_number_of_taps(pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality);
}
if (res) /* May need to re-check lb size after this in some obscure scenario */
calculate_inits_and_adj_vp(pipe_ctx, &recout_skip);
- dm_logger_write(pipe_ctx->stream->ctx->logger, LOG_SCALER,
- "%s: Viewport:\nheight:%d width:%d x:%d "
- "y:%d\n dst_rect:\nheight:%d width:%d x:%d "
- "y:%d\n",
- __func__, pipe_ctx->plane_res.scl_data.viewport.height, pipe_ctx->plane_res.scl_data.viewport.width, pipe_ctx->plane_res.scl_data.viewport.x, pipe_ctx->plane_res.scl_data.viewport.y, plane_state->dst_rect.height, plane_state->dst_rect.width, plane_state->dst_rect.x, plane_state->dst_rect.y);
+ DC_LOG_SCALER(pipe_ctx->stream->ctx->logger,
+ "%s: Viewport:\nheight:%d width:%d x:%d "
+ "y:%d\n dst_rect:\nheight:%d width:%d x:%d "
+ "y:%d\n",
+ __func__, pipe_ctx->plane_res.scl_data.viewport.height, pipe_ctx->plane_res.scl_data.viewport.width, pipe_ctx->plane_res.scl_data.viewport.x, pipe_ctx->plane_res.scl_data.viewport.y, plane_state->dst_rect.height, plane_state->dst_rect.width, plane_state->dst_rect.x, plane_state->dst_rect.y);
return res;
}<sep>@@
expression e1;
@@
- dm_logger_write(e1,LOG_SCALER,
+ DC_LOG_SCALER(e1,
...)
<|end_of_text|> | 10,757 |
--- initial
+++ final
@@ -1,20 +1,20 @@
static enum bp_result transmitter_control_v1_6(struct bios_parser *bp, struct bp_transmitter_control *cntl) {
enum bp_result result = BP_RESULT_FAILURE;
const struct command_table_helper *cmd = bp->cmd_helper;
struct dig_transmitter_control_ps_allocation_v1_6 ps = {{0}};
ps.param.phyid = cmd->phy_id_to_atom(cntl->transmitter);
ps.param.action = (uint8_t)cntl->action;
if (cntl->action == TRANSMITTER_CONTROL_SET_VOLTAGE_AND_PREEMPASIS)
ps.param.mode_laneset.dplaneset = (uint8_t)cntl->lane_settings;
else
ps.param.mode_laneset.digmode = cmd->signal_type_to_atom_dig_mode(cntl->signal);
ps.param.lanenum = (uint8_t)cntl->lanes_number;
ps.param.hpdsel = cmd->hpd_sel_to_atom(cntl->hpd_sel);
ps.param.digfe_sel = cmd->dig_encoder_sel_to_atom(cntl->engine_id);
ps.param.connobj_id = (uint8_t)cntl->connector_obj_id.id;
ps.param.symclk_10khz = cntl->pixel_clock / 10;
- if (cntl->action == TRANSMITTER_CONTROL_ENABLE || cntl->action == TRANSMITTER_CONTROL_ACTIAVATE || cntl->action == TRANSMITTER_CONTROL_DEACTIVATE) { dm_logger_write(bp->base.ctx->logger, LOG_BIOS, "%s:ps.param.symclk_10khz = %d\n", __func__, ps.param.symclk_10khz); }
+ if (cntl->action == TRANSMITTER_CONTROL_ENABLE || cntl->action == TRANSMITTER_CONTROL_ACTIAVATE || cntl->action == TRANSMITTER_CONTROL_DEACTIVATE) { DC_LOG_BIOS(bp->base.ctx->logger, "%s:ps.param.symclk_10khz = %d\n", __func__, ps.param.symclk_10khz); }
/*color_depth not used any more, driver has deep color factor in the Phyclk*/
if (EXEC_BIOS_CMD_TABLE(dig1transmittercontrol, ps)) result = BP_RESULT_OK;
return result;
}<sep>@@
expression e1;
@@
- dm_logger_write(e1,LOG_BIOS,
+ DC_LOG_BIOS(e1,
...)
<|end_of_text|> | 10,734 |
--- initial
+++ final
@@ -1,54 +1,54 @@
static void dce110_program_front_end_for_pipe(struct dc *dc, struct pipe_ctx *pipe_ctx) {
struct mem_input *mi = pipe_ctx->plane_res.mi;
struct pipe_ctx *old_pipe = NULL;
struct dc_plane_state *plane_state = pipe_ctx->plane_state;
struct xfm_grph_csc_adjustment adjust;
struct out_csc_color_matrix tbl_entry;
unsigned int i;
memset(&tbl_entry, 0, sizeof(tbl_entry));
if (dc->current_state) old_pipe = &dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx];
memset(&adjust, 0, sizeof(adjust));
adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
dce_enable_fe_clock(dc->hwseq, mi->inst, true);
set_default_colors(pipe_ctx);
if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
tbl_entry.color_space = pipe_ctx->stream->output_color_space;
for (i = 0; i < 12; i++)
tbl_entry.regval[i] = pipe_ctx->stream->csc_color_matrix.matrix[i];
pipe_ctx->plane_res.xfm->funcs->opp_set_csc_adjustment(pipe_ctx->plane_res.xfm, &tbl_entry);
}
if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
adjust.temperature_matrix[i] = pipe_ctx->stream->gamut_remap_matrix.matrix[i];
}
pipe_ctx->plane_res.xfm->funcs->transform_set_gamut_remap(pipe_ctx->plane_res.xfm, &adjust);
pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->bottom_pipe != 0;
program_scaler(dc, pipe_ctx);
#if defined(CONFIG_DRM_AMD_DC_FBC)
if (dc->fbc_compressor && old_pipe->stream) {
if (plane_state->tiling_info.gfx8.array_mode == DC_ARRAY_LINEAR_GENERAL)
dc->fbc_compressor->funcs->disable_fbc(dc->fbc_compressor);
else
enable_fbc(dc, dc->current_state);
}
#endif
mi->funcs->mem_input_program_surface_config(mi, plane_state->format, &plane_state->tiling_info, &plane_state->plane_size, plane_state->rotation, NULL, false);
if (mi->funcs->set_blank) mi->funcs->set_blank(mi, pipe_ctx->plane_state->visible);
if (dc->config.gpu_vm_support) mi->funcs->mem_input_program_pte_vm(pipe_ctx->plane_res.mi, plane_state->format, &plane_state->tiling_info, plane_state->rotation);
/* Moved programming gamma from dc to hwss */
if (pipe_ctx->plane_state->update_flags.bits.full_update || pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change || pipe_ctx->plane_state->update_flags.bits.gamma_change) dc->hwss.set_input_transfer_func(pipe_ctx, pipe_ctx->plane_state);
if (pipe_ctx->plane_state->update_flags.bits.full_update) dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream);
- dm_logger_write(dc->ctx->logger, LOG_SURFACE,
- "Pipe:%d 0x%x: addr hi:0x%x, "
- "addr low:0x%x, "
- "src: %d, %d, %d,"
- " %d; dst: %d, %d, %d, %d;"
- "clip: %d, %d, %d, %d\n",
- pipe_ctx->pipe_idx, pipe_ctx->plane_state, pipe_ctx->plane_state->address.grph.addr.high_part, pipe_ctx->plane_state->address.grph.addr.low_part, pipe_ctx->plane_state->src_rect.x, pipe_ctx->plane_state->src_rect.y, pipe_ctx->plane_state->src_rect.width, pipe_ctx->plane_state->src_rect.height, pipe_ctx->plane_state->dst_rect.x, pipe_ctx->plane_state->dst_rect.y, pipe_ctx->plane_state->dst_rect.width, pipe_ctx->plane_state->dst_rect.height, pipe_ctx->plane_state->clip_rect.x, pipe_ctx->plane_state->clip_rect.y, pipe_ctx->plane_state->clip_rect.width, pipe_ctx->plane_state->clip_rect.height);
- dm_logger_write(dc->ctx->logger, LOG_SURFACE,
- "Pipe %d: width, height, x, y\n"
- "viewport:%d, %d, %d, %d\n"
- "recout: %d, %d, %d, %d\n",
- pipe_ctx->pipe_idx, pipe_ctx->plane_res.scl_data.viewport.width, pipe_ctx->plane_res.scl_data.viewport.height, pipe_ctx->plane_res.scl_data.viewport.x, pipe_ctx->plane_res.scl_data.viewport.y, pipe_ctx->plane_res.scl_data.recout.width, pipe_ctx->plane_res.scl_data.recout.height, pipe_ctx->plane_res.scl_data.recout.x, pipe_ctx->plane_res.scl_data.recout.y);
+ DC_LOG_SURFACE(dc->ctx->logger,
+ "Pipe:%d 0x%x: addr hi:0x%x, "
+ "addr low:0x%x, "
+ "src: %d, %d, %d,"
+ " %d; dst: %d, %d, %d, %d;"
+ "clip: %d, %d, %d, %d\n",
+ pipe_ctx->pipe_idx, pipe_ctx->plane_state, pipe_ctx->plane_state->address.grph.addr.high_part, pipe_ctx->plane_state->address.grph.addr.low_part, pipe_ctx->plane_state->src_rect.x, pipe_ctx->plane_state->src_rect.y, pipe_ctx->plane_state->src_rect.width, pipe_ctx->plane_state->src_rect.height, pipe_ctx->plane_state->dst_rect.x, pipe_ctx->plane_state->dst_rect.y, pipe_ctx->plane_state->dst_rect.width, pipe_ctx->plane_state->dst_rect.height, pipe_ctx->plane_state->clip_rect.x, pipe_ctx->plane_state->clip_rect.y, pipe_ctx->plane_state->clip_rect.width, pipe_ctx->plane_state->clip_rect.height);
+ DC_LOG_SURFACE(dc->ctx->logger,
+ "Pipe %d: width, height, x, y\n"
+ "viewport:%d, %d, %d, %d\n"
+ "recout: %d, %d, %d, %d\n",
+ pipe_ctx->pipe_idx, pipe_ctx->plane_res.scl_data.viewport.width, pipe_ctx->plane_res.scl_data.viewport.height, pipe_ctx->plane_res.scl_data.viewport.x, pipe_ctx->plane_res.scl_data.viewport.y, pipe_ctx->plane_res.scl_data.recout.width, pipe_ctx->plane_res.scl_data.recout.height, pipe_ctx->plane_res.scl_data.recout.x, pipe_ctx->plane_res.scl_data.recout.y);
}<sep>@@
expression e1;
@@
- dm_logger_write(e1,LOG_SURFACE,
+ DC_LOG_SURFACE(e1,
...)
<|end_of_text|> | 10,760 |
--- initial
+++ final
@@ -1,9 +1,9 @@
static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve) {
struct se_lun_acl *lun_acl;
/*
* For nacl->dynamic_node_acl=1
*/
- lun_acl = rcu_dereference_check(se_deve->se_lun_acl, atomic_read(&se_deve->pr_kref.refcount) != 0);
+ lun_acl = rcu_dereference_check(se_deve->se_lun_acl, kref_read(&se_deve->pr_kref) != 0);
if (!lun_acl) return 0;
return target_depend_item(&lun_acl->se_lun_group.cg_item);
}<sep>@@
expression e;
@@
- atomic_read(&e.refcount)
+ kref_read(&e)
<|end_of_text|> | 8,947 |
--- initial
+++ final
@@ -1,40 +1,40 @@
static void chan_close_cb(struct l2cap_chan *chan) {
struct lowpan_btle_dev *entry;
struct lowpan_btle_dev *dev = NULL;
struct lowpan_peer *peer;
int err = -ENOENT;
bool last = false, remove = true;
BT_DBG("chan %p conn %p", chan, chan->conn);
if (chan->conn && chan->conn->hcon) {
if (!is_bt_6lowpan(chan->conn->hcon)) return;
/* If conn is set, then the netdev is also there and we should
* not remove it.
*/
remove = false;
}
spin_lock(&devices_lock);
list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
dev = lowpan_btle_dev(entry->netdev);
peer = __peer_lookup_chan(dev, chan);
if (peer) {
last = peer_del(dev, peer);
err = 0;
BT_DBG("dev %p removing %speer %p", dev, last ? "last " : "1 ", peer);
- BT_DBG("chan %p orig refcnt %d", chan, atomic_read(&chan->kref.refcount));
+ BT_DBG("chan %p orig refcnt %d", chan, kref_read(&chan->kref));
l2cap_chan_put(chan);
break;
}
}
if (!err && last && dev && !atomic_read(&dev->peer_count)) {
spin_unlock(&devices_lock);
cancel_delayed_work_sync(&dev->notify_peers);
ifdown(dev->netdev);
if (remove) {
INIT_WORK(&entry->delete_netdev, delete_netdev);
schedule_work(&entry->delete_netdev);
}
} else {
spin_unlock(&devices_lock);
}
return;
}<sep>@@
expression e;
@@
- atomic_read(&e.refcount)
+ kref_read(&e)
<|end_of_text|> | 8,874 |
--- initial
+++ final
@@ -1,11 +1,11 @@
static void pnv_php_detach_device_nodes(struct device_node *parent) {
struct device_node *dn;
int refcount;
for_each_child_of_node(parent, dn) {
pnv_php_detach_device_nodes(dn);
of_node_put(dn);
- refcount = atomic_read(&dn->kobj.kref.refcount);
+ refcount = kref_read(&dn->kobj.kref);
if (refcount != 1) pr_warn("Invalid refcount %d on <%s>\n", refcount, of_node_full_name(dn));
of_detach_node(dn);
}
}<sep>@@
expression e;
@@
- atomic_read(&e.refcount)
+ kref_read(&e)
<|end_of_text|> | 8,932 |
--- initial
+++ final
@@ -1,124 +1,124 @@
static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock, struct dlm_lockstatus *lksb, int flags, int *call_ast, int master_node) {
enum dlm_status status;
int actions = 0;
int in_use;
u8 owner;
mlog(0, "master_node = %d, valblk = %d\n", master_node, flags & LKM_VALBLK);
if (master_node)
BUG_ON(res->owner != dlm->node_num);
else
BUG_ON(res->owner == dlm->node_num);
spin_lock(&dlm->ast_lock);
/* We want to be sure that we're not freeing a lock
* that still has AST's pending... */
in_use = !list_empty(&lock->ast_list);
spin_unlock(&dlm->ast_lock);
if (in_use && !(flags & LKM_CANCEL)) {
mlog(ML_ERROR,
"lockres %.*s: Someone is calling dlmunlock "
"while waiting for an ast!",
res->lockname.len, res->lockname.name);
return DLM_BADPARAM;
}
spin_lock(&res->spinlock);
if (res->state & DLM_LOCK_RES_IN_PROGRESS) {
if (master_node && !(flags & LKM_CANCEL)) {
mlog(ML_ERROR, "lockres in progress!\n");
spin_unlock(&res->spinlock);
return DLM_FORWARD;
}
/* ok for this to sleep if not in a network handler */
__dlm_wait_on_lockres(res);
res->state |= DLM_LOCK_RES_IN_PROGRESS;
}
spin_lock(&lock->spinlock);
if (res->state & DLM_LOCK_RES_RECOVERING) {
status = DLM_RECOVERING;
goto leave;
}
if (res->state & DLM_LOCK_RES_MIGRATING) {
status = DLM_MIGRATING;
goto leave;
}
/* see above for what the spec says about
* LKM_CANCEL and the lock queue state */
if (flags & LKM_CANCEL)
status = dlm_get_cancel_actions(dlm, res, lock, lksb, &actions);
else
status = dlm_get_unlock_actions(dlm, res, lock, lksb, &actions);
if (status != DLM_NORMAL && (status != DLM_CANCELGRANT || !master_node)) goto leave;
/* By now this has been masked out of cancel requests. */
if (flags & LKM_VALBLK) {
/* make the final update to the lvb */
if (master_node)
memcpy(res->lvb, lksb->lvb, DLM_LVB_LEN);
else
flags |= LKM_PUT_LVB; /* let the send function
* handle it. */
}
if (!master_node) {
owner = res->owner;
/* drop locks and send message */
if (flags & LKM_CANCEL)
lock->cancel_pending = 1;
else
lock->unlock_pending = 1;
spin_unlock(&lock->spinlock);
spin_unlock(&res->spinlock);
status = dlm_send_remote_unlock_request(dlm, res, lock, lksb, flags, owner);
spin_lock(&res->spinlock);
spin_lock(&lock->spinlock);
/* if the master told us the lock was already granted,
* let the ast handle all of these actions */
if (status == DLM_CANCELGRANT) {
actions &= ~(DLM_UNLOCK_REMOVE_LOCK | DLM_UNLOCK_REGRANT_LOCK | DLM_UNLOCK_CLEAR_CONVERT_TYPE);
} else if (status == DLM_RECOVERING || status == DLM_MIGRATING || status == DLM_FORWARD || status == DLM_NOLOCKMGR) {
/* must clear the actions because this unlock
* is about to be retried. cannot free or do
* any list manipulation. */
mlog(0, "%s:%.*s: clearing actions, %s\n", dlm->name, res->lockname.len, res->lockname.name, status == DLM_RECOVERING ? "recovering" : (status == DLM_MIGRATING ? "migrating" : (status == DLM_FORWARD ? "forward" : "nolockmanager")));
actions = 0;
}
if (flags & LKM_CANCEL)
lock->cancel_pending = 0;
else
lock->unlock_pending = 0;
}
/* get an extra ref on lock. if we are just switching
* lists here, we dont want the lock to go away. */
dlm_lock_get(lock);
if (actions & DLM_UNLOCK_REMOVE_LOCK) {
list_del_init(&lock->list);
dlm_lock_put(lock);
}
if (actions & DLM_UNLOCK_REGRANT_LOCK) {
dlm_lock_get(lock);
list_add_tail(&lock->list, &res->granted);
}
if (actions & DLM_UNLOCK_CLEAR_CONVERT_TYPE) {
mlog(0, "clearing convert_type at %smaster node\n", master_node ? "" : "non-");
lock->ml.convert_type = LKM_IVMODE;
}
/* remove the extra ref on lock */
dlm_lock_put(lock);
leave:
res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
if (!dlm_lock_on_list(&res->converting, lock))
BUG_ON(lock->ml.convert_type != LKM_IVMODE);
else
BUG_ON(lock->ml.convert_type == LKM_IVMODE);
spin_unlock(&lock->spinlock);
spin_unlock(&res->spinlock);
wake_up(&res->wq);
/* let the caller's final dlm_lock_put handle the actual kfree */
if (actions & DLM_UNLOCK_FREE_LOCK) {
/* this should always be coupled with list removal */
BUG_ON(!(actions & DLM_UNLOCK_REMOVE_LOCK));
- mlog(0, "lock %u:%llu should be gone now! refs=%d\n", dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), atomic_read(&lock->lock_refs.refcount) - 1);
+ mlog(0, "lock %u:%llu should be gone now! refs=%d\n", dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), kref_read(&lock->lock_refs) - 1);
dlm_lock_put(lock);
}
if (actions & DLM_UNLOCK_CALL_AST) *call_ast = 1;
/* if cancel or unlock succeeded, lvb work is done */
if (status == DLM_NORMAL) lksb->flags &= ~(DLM_LKSB_PUT_LVB | DLM_LKSB_GET_LVB);
return status;
}<sep>@@
expression e;
@@
- atomic_read(&e.refcount)
+ kref_read(&e)
<|end_of_text|> | 8,896 |
--- initial
+++ final
@@ -1,33 +1,33 @@
static ssize_t mei_dbgfs_read_meclients(struct file *fp, char __user *ubuf, size_t cnt, loff_t *ppos) {
struct mei_device *dev = fp->private_data;
struct mei_me_client *me_cl;
size_t bufsz = 1;
char *buf;
int i = 0;
int pos = 0;
int ret;
#define HDR " |id|fix| UUID |con|msg len|sb|refc|\n"
down_read(&dev->me_clients_rwsem);
list_for_each_entry(me_cl, &dev->me_clients, list) bufsz++;
bufsz *= sizeof(HDR) + 1;
buf = kzalloc(bufsz, GFP_KERNEL);
if (!buf) {
up_read(&dev->me_clients_rwsem);
return -ENOMEM;
}
pos += scnprintf(buf + pos, bufsz - pos, HDR);
#undef HDR
/* if the driver is not enabled the list won't be consistent */
if (dev->dev_state != MEI_DEV_ENABLED) goto out;
list_for_each_entry(me_cl, &dev->me_clients, list) {
if (mei_me_cl_get(me_cl)) {
- pos += scnprintf(buf + pos, bufsz - pos, "%2d|%2d|%3d|%pUl|%3d|%7d|%2d|%4d|\n", i++, me_cl->client_id, me_cl->props.fixed_address, &me_cl->props.protocol_name, me_cl->props.max_number_of_connections, me_cl->props.max_msg_length, me_cl->props.single_recv_buf, atomic_read(&me_cl->refcnt.refcount));
+ pos += scnprintf(buf + pos, bufsz - pos, "%2d|%2d|%3d|%pUl|%3d|%7d|%2d|%4d|\n", i++, me_cl->client_id, me_cl->props.fixed_address, &me_cl->props.protocol_name, me_cl->props.max_number_of_connections, me_cl->props.max_msg_length, me_cl->props.single_recv_buf, kref_read(&me_cl->refcnt));
mei_me_cl_put(me_cl);
}
}
out:
up_read(&dev->me_clients_rwsem);
ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, pos);
kfree(buf);
return ret;
}<sep>@@
expression e;
@@
- atomic_read(&e.refcount)
+ kref_read(&e)
<|end_of_text|> | 8,886 |
--- initial
+++ final
@@ -1,99 +1,99 @@
static struct t10_pr_registration *__core_scsi3_alloc_registration(struct se_device *dev, struct se_node_acl *nacl, struct se_lun *lun, struct se_dev_entry *deve, u64 mapped_lun, unsigned char *isid, u64 sa_res_key, int all_tg_pt, int aptpl) {
struct se_dev_entry *deve_tmp;
struct se_node_acl *nacl_tmp;
struct se_lun_acl *lacl_tmp;
struct se_lun *lun_tmp, *next, *dest_lun;
const struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
struct t10_pr_registration *pr_reg, *pr_reg_atp, *pr_reg_tmp, *pr_reg_tmp_safe;
int ret;
/*
* Create a registration for the I_T Nexus upon which the
* PROUT REGISTER was received.
*/
pr_reg = __core_scsi3_do_alloc_registration(dev, nacl, lun, deve, mapped_lun, isid, sa_res_key, all_tg_pt, aptpl);
if (!pr_reg) return NULL;
/*
* Return pointer to pr_reg for ALL_TG_PT=0
*/
if (!all_tg_pt) return pr_reg;
/*
* Create list of matching SCSI Initiator Port registrations
* for ALL_TG_PT=1
*/
spin_lock(&dev->se_port_lock);
list_for_each_entry_safe(lun_tmp, next, &dev->dev_sep_list, lun_dev_link) {
if (!percpu_ref_tryget_live(&lun_tmp->lun_ref)) continue;
spin_unlock(&dev->se_port_lock);
spin_lock(&lun_tmp->lun_deve_lock);
list_for_each_entry(deve_tmp, &lun_tmp->lun_deve_list, lun_link) {
/*
* This pointer will be NULL for demo mode MappedLUNs
* that have not been make explicit via a ConfigFS
* MappedLUN group for the SCSI Initiator Node ACL.
*/
if (!deve_tmp->se_lun_acl) continue;
lacl_tmp = rcu_dereference_check(deve_tmp->se_lun_acl, lockdep_is_held(&lun_tmp->lun_deve_lock));
nacl_tmp = lacl_tmp->se_lun_nacl;
/*
* Skip the matching struct se_node_acl that is allocated
* above..
*/
if (nacl == nacl_tmp) continue;
/*
* Only perform PR registrations for target ports on
* the same fabric module as the REGISTER w/ ALL_TG_PT=1
* arrived.
*/
if (tfo != nacl_tmp->se_tpg->se_tpg_tfo) continue;
/*
* Look for a matching Initiator Node ACL in ASCII format
*/
if (strcmp(nacl->initiatorname, nacl_tmp->initiatorname)) continue;
kref_get(&deve_tmp->pr_kref);
spin_unlock(&lun_tmp->lun_deve_lock);
/*
* Grab a configfs group dependency that is released
* for the exception path at label out: below, or upon
* completion of adding ALL_TG_PT=1 registrations in
* __core_scsi3_add_registration()
*/
ret = core_scsi3_lunacl_depend_item(deve_tmp);
if (ret < 0) {
pr_err("core_scsi3_lunacl_depend"
"_item() failed\n");
percpu_ref_put(&lun_tmp->lun_ref);
kref_put(&deve_tmp->pr_kref, target_pr_kref_release);
goto out;
}
/*
* Located a matching SCSI Initiator Port on a different
* port, allocate the pr_reg_atp and attach it to the
* pr_reg->pr_reg_atp_list that will be processed once
* the original *pr_reg is processed in
* __core_scsi3_add_registration()
*/
- dest_lun = rcu_dereference_check(deve_tmp->se_lun, atomic_read(&deve_tmp->pr_kref.refcount) != 0);
+ dest_lun = rcu_dereference_check(deve_tmp->se_lun, kref_read(&deve_tmp->pr_kref) != 0);
pr_reg_atp = __core_scsi3_do_alloc_registration(dev, nacl_tmp, dest_lun, deve_tmp, deve_tmp->mapped_lun, NULL, sa_res_key, all_tg_pt, aptpl);
if (!pr_reg_atp) {
percpu_ref_put(&lun_tmp->lun_ref);
core_scsi3_lunacl_undepend_item(deve_tmp);
goto out;
}
list_add_tail(&pr_reg_atp->pr_reg_atp_mem_list, &pr_reg->pr_reg_atp_list);
spin_lock(&lun_tmp->lun_deve_lock);
}
spin_unlock(&lun_tmp->lun_deve_lock);
spin_lock(&dev->se_port_lock);
percpu_ref_put(&lun_tmp->lun_ref);
}
spin_unlock(&dev->se_port_lock);
return pr_reg;
out:
list_for_each_entry_safe(pr_reg_tmp, pr_reg_tmp_safe, &pr_reg->pr_reg_atp_list, pr_reg_atp_mem_list) {
list_del(&pr_reg_tmp->pr_reg_atp_mem_list);
core_scsi3_lunacl_undepend_item(pr_reg_tmp->pr_reg_deve);
kmem_cache_free(t10_pr_reg_cache, pr_reg_tmp);
}
kmem_cache_free(t10_pr_reg_cache, pr_reg);
return NULL;
}<sep>@@
expression e;
@@
- atomic_read(&e.refcount)
+ kref_read(&e)
<|end_of_text|> | 8,944 |
--- initial
+++ final
@@ -1,23 +1,23 @@
int genwqe_device_remove(struct genwqe_dev *cd) {
int rc;
struct pci_dev *pci_dev = cd->pci_dev;
if (!genwqe_device_initialized(cd)) return 1;
genwqe_inform_and_stop_processes(cd);
/*
* We currently do wait until all filedescriptors are
* closed. This leads to a problem when we abort the
* application which will decrease this reference from
* 1/unused to 0/illegal and not from 2/used 1/empty.
*/
- rc = atomic_read(&cd->cdev_genwqe.kobj.kref.refcount);
+ rc = kref_read(&cd->cdev_genwqe.kobj.kref);
if (rc != 1) {
dev_err(&pci_dev->dev, "[%s] err: cdev_genwqe...refcount=%d\n", __func__, rc);
panic("Fatal err: cannot free resources with pending references!");
}
genqwe_exit_debugfs(cd);
device_destroy(cd->class_genwqe, cd->devnum_genwqe);
cdev_del(&cd->cdev_genwqe);
unregister_chrdev_region(cd->devnum_genwqe, GENWQE_MAX_MINOR);
cd->dev = NULL;
return 0;
}<sep>@@
expression e;
@@
- atomic_read(&e.refcount)
+ kref_read(&e)
<|end_of_text|> | 8,884 |
--- initial
+++ final
@@ -1,26 +1,26 @@
static int dump_mle(struct dlm_master_list_entry *mle, char *buf, int len) {
int out = 0;
char *mle_type;
if (mle->type == DLM_MLE_BLOCK)
mle_type = "BLK";
else if (mle->type == DLM_MLE_MASTER)
mle_type = "MAS";
else
mle_type = "MIG";
out += stringify_lockname(mle->mname, mle->mnamelen, buf + out, len - out);
- out += snprintf(buf + out, len - out, "\t%3s\tmas=%3u\tnew=%3u\tevt=%1d\tuse=%1d\tref=%3d\n", mle_type, mle->master, mle->new_master, !list_empty(&mle->hb_events), !!mle->inuse, atomic_read(&mle->mle_refs.refcount));
+ out += snprintf(buf + out, len - out, "\t%3s\tmas=%3u\tnew=%3u\tevt=%1d\tuse=%1d\tref=%3d\n", mle_type, mle->master, mle->new_master, !list_empty(&mle->hb_events), !!mle->inuse, kref_read(&mle->mle_refs));
out += snprintf(buf + out, len - out, "Maybe=");
out += stringify_nodemap(mle->maybe_map, O2NM_MAX_NODES, buf + out, len - out);
out += snprintf(buf + out, len - out, "\n");
out += snprintf(buf + out, len - out, "Vote=");
out += stringify_nodemap(mle->vote_map, O2NM_MAX_NODES, buf + out, len - out);
out += snprintf(buf + out, len - out, "\n");
out += snprintf(buf + out, len - out, "Response=");
out += stringify_nodemap(mle->response_map, O2NM_MAX_NODES, buf + out, len - out);
out += snprintf(buf + out, len - out, "\n");
out += snprintf(buf + out, len - out, "Node=");
out += stringify_nodemap(mle->node_map, O2NM_MAX_NODES, buf + out, len - out);
out += snprintf(buf + out, len - out, "\n");
out += snprintf(buf + out, len - out, "\n");
return out;
}<sep>@@
expression e;
@@
- atomic_read(&e.refcount)
+ kref_read(&e)
<|end_of_text|> | 8,892 |
--- initial
+++ final
@@ -1,4 +1,4 @@
int amp_ctrl_put(struct amp_ctrl *ctrl) {
- BT_DBG("ctrl %p orig refcnt %d", ctrl, atomic_read(&ctrl->kref.refcount));
+ BT_DBG("ctrl %p orig refcnt %d", ctrl, kref_read(&ctrl->kref));
return kref_put(&ctrl->kref, &_ctrl_destroy);
}<sep>@@
expression e;
@@
- atomic_read(&e.refcount)
+ kref_read(&e)
<|end_of_text|> | 8,878 |
--- initial
+++ final
@@ -1,45 +1,45 @@
static void __svc_rdma_free(struct work_struct *work) {
struct svcxprt_rdma *rdma = container_of(work, struct svcxprt_rdma, sc_work);
struct svc_xprt *xprt = &rdma->sc_xprt;
dprintk("svcrdma: %s(%p)\n", __func__, rdma);
if (rdma->sc_qp && !IS_ERR(rdma->sc_qp)) ib_drain_qp(rdma->sc_qp);
/* We should only be called from kref_put */
- if (atomic_read(&xprt->xpt_ref.refcount) != 0) pr_err("svcrdma: sc_xprt still in use? (%d)\n", atomic_read(&xprt->xpt_ref.refcount));
+ if (kref_read(&xprt->xpt_ref) != 0) pr_err("svcrdma: sc_xprt still in use? (%d)\n", kref_read(&xprt->xpt_ref));
/*
* Destroy queued, but not processed read completions. Note
* that this cleanup has to be done before destroying the
* cm_id because the device ptr is needed to unmap the dma in
* svc_rdma_put_context.
*/
while (!list_empty(&rdma->sc_read_complete_q)) {
struct svc_rdma_op_ctxt *ctxt;
ctxt = list_entry(rdma->sc_read_complete_q.next, struct svc_rdma_op_ctxt, dto_q);
list_del_init(&ctxt->dto_q);
svc_rdma_put_context(ctxt, 1);
}
/* Destroy queued, but not processed recv completions */
while (!list_empty(&rdma->sc_rq_dto_q)) {
struct svc_rdma_op_ctxt *ctxt;
ctxt = list_entry(rdma->sc_rq_dto_q.next, struct svc_rdma_op_ctxt, dto_q);
list_del_init(&ctxt->dto_q);
svc_rdma_put_context(ctxt, 1);
}
/* Warn if we leaked a resource or under-referenced */
if (rdma->sc_ctxt_used != 0) pr_err("svcrdma: ctxt still in use? (%d)\n", rdma->sc_ctxt_used);
/* Final put of backchannel client transport */
if (xprt->xpt_bc_xprt) {
xprt_put(xprt->xpt_bc_xprt);
xprt->xpt_bc_xprt = NULL;
}
rdma_dealloc_frmr_q(rdma);
svc_rdma_destroy_ctxts(rdma);
svc_rdma_destroy_maps(rdma);
/* Destroy the QP if present (not a listener) */
if (rdma->sc_qp && !IS_ERR(rdma->sc_qp)) ib_destroy_qp(rdma->sc_qp);
if (rdma->sc_sq_cq && !IS_ERR(rdma->sc_sq_cq)) ib_free_cq(rdma->sc_sq_cq);
if (rdma->sc_rq_cq && !IS_ERR(rdma->sc_rq_cq)) ib_free_cq(rdma->sc_rq_cq);
if (rdma->sc_pd && !IS_ERR(rdma->sc_pd)) ib_dealloc_pd(rdma->sc_pd);
/* Destroy the CM ID */
rdma_destroy_id(rdma->sc_cm_id);
kfree(rdma);
}<sep>@@
expression e;
@@
- atomic_read(&e.refcount)
+ kref_read(&e)
<|end_of_text|> | 8,939 |
--- initial
+++ final
@@ -1,33 +1,33 @@
void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) {
struct lpfc_vport *vport = pmb->vport;
struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->context1);
struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->context2;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
pmb->context1 = NULL;
pmb->context2 = NULL;
- lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, "0002 rpi:%x DID:%x flg:%x %d map:%x %p\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, atomic_read(&ndlp->kref.refcount), ndlp->nlp_usg_map, ndlp);
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, "0002 rpi:%x DID:%x flg:%x %d map:%x %p\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref), ndlp->nlp_usg_map, ndlp);
if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND) ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
if (ndlp->nlp_flag & NLP_IGNR_REG_CMPL || ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) {
/* We rcvd a rscn after issuing this
* mbox reg login, we may have cycled
* back through the state and be
* back at reg login state so this
* mbox needs to be ignored becase
* there is another reg login in
* process.
*/
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
spin_unlock_irq(shost->host_lock);
}
/* Call state machine */
lpfc_disc_state_machine(vport, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN);
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
mempool_free(pmb, phba->mbox_mem_pool);
/* decrement the node reference count held for this callback
* function.
*/
lpfc_nlp_put(ndlp);
return;
}<sep>@@
expression e;
@@
- atomic_read(&e.refcount)
+ kref_read(&e)
<|end_of_text|> | 8,914 |
--- initial
+++ final
@@ -1,49 +1,49 @@
int lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) {
struct lpfc_hba *phba = vport->phba;
LPFC_MBOXQ_t *mbox;
int rc, acc_plogi = 1;
uint16_t rpi;
if (ndlp->nlp_flag & NLP_RPI_REGISTERED || ndlp->nlp_flag & NLP_REG_LOGIN_SEND) {
if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
"3366 RPI x%x needs to be "
"unregistered nlp_flag x%x "
"did x%x\n",
ndlp->nlp_rpi, ndlp->nlp_flag, ndlp->nlp_DID);
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (mbox) {
/* SLI4 ports require the physical rpi value. */
rpi = ndlp->nlp_rpi;
if (phba->sli_rev == LPFC_SLI_REV4) rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
lpfc_unreg_login(phba, vport->vpi, rpi, mbox);
mbox->vport = vport;
if (ndlp->nlp_flag & NLP_ISSUE_LOGO) {
mbox->context1 = ndlp;
mbox->mbox_cmpl = lpfc_nlp_logo_unreg;
} else {
- if (phba->sli_rev == LPFC_SLI_REV4 && (!(vport->load_flag & FC_UNLOADING)) && (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == LPFC_SLI_INTF_IF_TYPE_2) && (atomic_read(&ndlp->kref.refcount) > 0)) {
+ if (phba->sli_rev == LPFC_SLI_REV4 && (!(vport->load_flag & FC_UNLOADING)) && (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == LPFC_SLI_INTF_IF_TYPE_2) && (kref_read(&ndlp->kref) > 0)) {
mbox->context1 = lpfc_nlp_get(ndlp);
mbox->mbox_cmpl = lpfc_sli4_unreg_rpi_cmpl_clr;
/*
* accept PLOGIs after unreg_rpi_cmpl
*/
acc_plogi = 0;
} else
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
}
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
mempool_free(mbox, phba->mbox_mem_pool);
acc_plogi = 1;
}
}
lpfc_no_rpi(phba, ndlp);
if (phba->sli_rev != LPFC_SLI_REV4) ndlp->nlp_rpi = 0;
ndlp->nlp_flag &= ~NLP_RPI_REGISTERED;
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
if (acc_plogi) ndlp->nlp_flag &= ~NLP_LOGO_ACC;
return 1;
}
ndlp->nlp_flag &= ~NLP_LOGO_ACC;
return 0;
}<sep>@@
expression e;
@@
- atomic_read(&e.refcount)
+ kref_read(&e)
<|end_of_text|> | 8,921 |
--- initial
+++ final
@@ -1,23 +1,23 @@
void lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) {
struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->context1);
struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->context2;
pmb->context1 = NULL;
pmb->context2 = NULL;
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
mempool_free(pmb, phba->mbox_mem_pool);
if (ndlp) {
- lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, "0006 rpi%x DID:%x flg:%x %d map:%x %p\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, atomic_read(&ndlp->kref.refcount), ndlp->nlp_usg_map, ndlp);
+ lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, "0006 rpi%x DID:%x flg:%x %d map:%x %p\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref), ndlp->nlp_usg_map, ndlp);
if (NLP_CHK_NODE_ACT(ndlp)) {
lpfc_nlp_put(ndlp);
/* This is the end of the default RPI cleanup logic for
* this ndlp. If no other discovery threads are using
* this ndlp, free all resources associated with it.
*/
lpfc_nlp_not_used(ndlp);
} else {
lpfc_drop_node(ndlp->vport, ndlp);
}
}
return;
}<sep>@@
expression e;
@@
- atomic_read(&e.refcount)
+ kref_read(&e)
<|end_of_text|> | 8,909 |
--- initial
+++ final
@@ -1,5 +1,5 @@
static ssize_t usnic_ib_show_max_vf(struct device *device, struct device_attribute *attr, char *buf) {
struct usnic_ib_dev *us_ibdev;
us_ibdev = container_of(device, struct usnic_ib_dev, ib_dev.dev);
- return scnprintf(buf, PAGE_SIZE, "%u\n", atomic_read(&us_ibdev->vf_cnt.refcount));
+ return scnprintf(buf, PAGE_SIZE, "%u\n", kref_read(&us_ibdev->vf_cnt));
}<sep>@@
expression e;
@@
- atomic_read(&e.refcount)
+ kref_read(&e)
<|end_of_text|> | 8,955 |
--- initial
+++ final
@@ -1,4 +1,4 @@
void ceph_osdc_get_request(struct ceph_osd_request *req) {
- dout("%s %p (was %d)\n", __func__, req, atomic_read(&req->r_kref.refcount));
+ dout("%s %p (was %d)\n", __func__, req, kref_read(&req->r_kref));
kref_get(&req->r_kref);
}<sep>@@
expression e;
@@
- atomic_read(&e.refcount)
+ kref_read(&e)
<|end_of_text|> | 8,929 |
--- initial
+++ final
@@ -1,16 +1,16 @@
void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m) {
struct omap_gem_object *omap_obj = to_omap_bo(obj);
uint64_t off;
off = drm_vma_node_start(&obj->vma_node);
- seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d", omap_obj->flags, obj->name, obj->refcount.refcount.counter, off, &omap_obj->paddr, omap_obj->paddr_cnt, omap_obj->vaddr, omap_obj->roll);
+ seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d", omap_obj->flags, obj->name, kref_read(&obj->refcount), off, &omap_obj->paddr, omap_obj->paddr_cnt, omap_obj->vaddr, omap_obj->roll);
if (omap_obj->flags & OMAP_BO_TILED) {
seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height);
if (omap_obj->block) {
struct tcm_area *area = &omap_obj->block->area;
seq_printf(m, " (%dx%d, %dx%d)", area->p0.x, area->p0.y, area->p1.x, area->p1.y);
}
} else {
seq_printf(m, " %d", obj->size);
}
seq_printf(m, "\n");
}<sep>@@
expression e;
@@
- e.refcount.counter
+ kref_read(&e)
<|end_of_text|> | 8,928 |
--- initial
+++ final
@@ -1,4 +1,4 @@
void l2cap_chan_put(struct l2cap_chan *c) {
- BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
+ BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref));
kref_put(&c->kref, l2cap_chan_destroy);
}<sep>@@
expression e;
@@
- atomic_read(&e.refcount)
+ kref_read(&e)
<|end_of_text|> | 8,907 |
--- initial
+++ final
@@ -1,27 +1,27 @@
static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd) {
struct qla_tgt_cmd *cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
if (cmd->aborted) {
/* Cmd can loop during Q-full. tcm_qla2xxx_aborted_task
* can get ahead of this cmd. tcm_qla2xxx_aborted_task
* already kick start the free.
*/
pr_debug("write_pending aborted cmd[%p] refcount %d "
"transport_state %x, t_state %x, se_cmd_flags %x\n",
- cmd, cmd->se_cmd.cmd_kref.refcount.counter, cmd->se_cmd.transport_state, cmd->se_cmd.t_state, cmd->se_cmd.se_cmd_flags);
+ cmd, kref_read(&cmd->se_cmd.cmd_kref), cmd->se_cmd.transport_state, cmd->se_cmd.t_state, cmd->se_cmd.se_cmd_flags);
return 0;
}
cmd->cmd_flags |= BIT_3;
cmd->bufflen = se_cmd->data_length;
cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
cmd->sg_cnt = se_cmd->t_data_nents;
cmd->sg = se_cmd->t_data_sg;
cmd->prot_sg_cnt = se_cmd->t_prot_nents;
cmd->prot_sg = se_cmd->t_prot_sg;
cmd->blk_sz = se_cmd->se_dev->dev_attrib.block_size;
se_cmd->pi_err = 0;
/*
* qla_target.c:qlt_rdy_to_xfer() will call pci_map_sg() to setup
* the SGL mappings into PCIe memory for incoming FCP WRITE data.
*/
return qlt_rdy_to_xfer(cmd);
}<sep>@@
expression e;
@@
- e.refcount.counter
+ kref_read(&e)
<|end_of_text|> | 8,950 |
--- initial
+++ final
@@ -1,16 +1,16 @@
void lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, uint32_t did) {
memset(ndlp, 0, sizeof(struct lpfc_nodelist));
lpfc_initialize_node(vport, ndlp, did);
INIT_LIST_HEAD(&ndlp->nlp_listp);
if (vport->phba->sli_rev == LPFC_SLI_REV4) {
ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba);
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
"0007 rpi:%x DID:%x flg:%x refcnt:%d "
"map:%x %p\n",
- ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, atomic_read(&ndlp->kref.refcount), ndlp->nlp_usg_map, ndlp);
+ ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref), ndlp->nlp_usg_map, ndlp);
ndlp->active_rrqs_xri_bitmap = mempool_alloc(vport->phba->active_rrq_pool, GFP_KERNEL);
if (ndlp->active_rrqs_xri_bitmap) memset(ndlp->active_rrqs_xri_bitmap, 0, ndlp->phba->cfg_rrq_xri_bitmap_sz);
}
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, "node init: did:x%x", ndlp->nlp_DID, 0, 0);
return;
}<sep>@@
expression e;
@@
- atomic_read(&e.refcount)
+ kref_read(&e)
<|end_of_text|> | 8,916 |
--- initial
+++ final
@@ -1,5 +1,5 @@
struct ceph_msg *ceph_msg_get(struct ceph_msg *msg) {
- dout("%s %p (was %d)\n", __func__, msg, atomic_read(&msg->kref.refcount));
+ dout("%s %p (was %d)\n", __func__, msg, kref_read(&msg->kref));
kref_get(&msg->kref);
return msg;
}<sep>@@
expression e;
@@
- atomic_read(&e.refcount)
+ kref_read(&e)
<|end_of_text|> | 8,923 |
--- initial
+++ final
@@ -1,4 +1,4 @@
bool comedi_buf_is_mmapped(struct comedi_subdevice *s) {
struct comedi_buf_map *bm = s->async->buf_map;
- return bm && (atomic_read(&bm->refcount.refcount) > 1);
+ return bm && (kref_read(&bm->refcount) > 1);
}<sep>@@
expression e;
@@
- atomic_read(&e.refcount)
+ kref_read(&e)
<|end_of_text|> | 8,885 |
--- initial
+++ final
@@ -1,43 +1,43 @@
static int ion_debug_heap_show(struct seq_file *s, void *unused) {
struct ion_heap *heap = s->private;
struct ion_device *dev = heap->dev;
struct rb_node *n;
size_t total_size = 0;
size_t total_orphaned_size = 0;
seq_printf(s, "%16s %16s %16s\n", "client", "pid", "size");
seq_puts(s, "----------------------------------------------------\n");
mutex_lock(&debugfs_mutex);
for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
struct ion_client *client = rb_entry(n, struct ion_client, node);
size_t size = ion_debug_heap_total(client, heap->id);
if (!size) continue;
if (client->task) {
char task_comm[TASK_COMM_LEN];
get_task_comm(task_comm, client->task);
seq_printf(s, "%16s %16u %16zu\n", task_comm, client->pid, size);
} else {
seq_printf(s, "%16s %16u %16zu\n", client->name, client->pid, size);
}
}
mutex_unlock(&debugfs_mutex);
seq_puts(s, "----------------------------------------------------\n");
seq_puts(s, "orphaned allocations (info is from last known client):\n");
mutex_lock(&dev->buffer_lock);
for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
struct ion_buffer *buffer = rb_entry(n, struct ion_buffer, node);
if (buffer->heap->id != heap->id) continue;
total_size += buffer->size;
if (!buffer->handle_count) {
- seq_printf(s, "%16s %16u %16zu %d %d\n", buffer->task_comm, buffer->pid, buffer->size, buffer->kmap_cnt, atomic_read(&buffer->ref.refcount));
+ seq_printf(s, "%16s %16u %16zu %d %d\n", buffer->task_comm, buffer->pid, buffer->size, buffer->kmap_cnt, kref_read(&buffer->ref));
total_orphaned_size += buffer->size;
}
}
mutex_unlock(&dev->buffer_lock);
seq_puts(s, "----------------------------------------------------\n");
seq_printf(s, "%16s %16zu\n", "total orphaned", total_orphaned_size);
seq_printf(s, "%16s %16zu\n", "total ", total_size);
if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) seq_printf(s, "%16s %16zu\n", "deferred free", heap->free_list_size);
seq_puts(s, "----------------------------------------------------\n");
if (heap->debug_show) heap->debug_show(heap, s, unused);
return 0;
}<sep>@@
expression e;
@@
- atomic_read(&e.refcount)
+ kref_read(&e)
<|end_of_text|> | 8,904 |
--- initial
+++ final
@@ -1,62 +1,62 @@
void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) {
MAILBOX_t *mb = &pmb->u.mb;
struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->context1);
struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->context2;
struct lpfc_vport *vport = pmb->vport;
pmb->context1 = NULL;
pmb->context2 = NULL;
if (mb->mbxStatus) {
out:
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, "0260 Register NameServer error: 0x%x\n", mb->mbxStatus);
/* decrement the node reference count held for this
* callback function.
*/
lpfc_nlp_put(ndlp);
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
mempool_free(pmb, phba->mbox_mem_pool);
/* If no other thread is using the ndlp, free it */
lpfc_nlp_not_used(ndlp);
if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
/*
* RegLogin failed, use loop map to make discovery
* list
*/
lpfc_disc_list_loopmap(vport);
/* Start discovery */
lpfc_disc_start(vport);
return;
}
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
return;
}
if (phba->sli_rev < LPFC_SLI_REV4) ndlp->nlp_rpi = mb->un.varWords[0];
ndlp->nlp_flag |= NLP_RPI_REGISTERED;
ndlp->nlp_type |= NLP_FABRIC;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
- lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, "0003 rpi:%x DID:%x flg:%x %d map%x %p\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, atomic_read(&ndlp->kref.refcount), ndlp->nlp_usg_map, ndlp);
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, "0003 rpi:%x DID:%x flg:%x %d map%x %p\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref), ndlp->nlp_usg_map, ndlp);
if (vport->port_state < LPFC_VPORT_READY) {
/* Link up discovery requires Fabric registration. */
lpfc_ns_cmd(vport, SLI_CTNS_RNN_ID, 0, 0);
lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0);
lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0);
lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, 0);
/* Issue SCR just before NameServer GID_FT Query */
lpfc_issue_els_scr(vport, SCR_DID, 0);
}
vport->fc_ns_retry = 0;
/* Good status, issue CT Request to NameServer */
if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0)) {
/* Cannot issue NameServer Query, so finish up discovery */
goto out;
}
/* decrement the node reference count held for this
* callback function.
*/
lpfc_nlp_put(ndlp);
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
mempool_free(pmb, phba->mbox_mem_pool);
return;
}<sep>@@
expression e;
@@
- atomic_read(&e.refcount)
+ kref_read(&e)
<|end_of_text|> | 8,913 |
--- initial
+++ final
@@ -1,20 +1,20 @@
static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m) {
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
struct reservation_object *robj = etnaviv_obj->resv;
struct reservation_object_list *fobj;
struct dma_fence *fence;
unsigned long off = drm_vma_node_start(&obj->vma_node);
- seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n", etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I', obj->name, obj->refcount.refcount.counter, off, etnaviv_obj->vaddr, obj->size);
+ seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n", etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I', obj->name, kref_read(&obj->refcount), off, etnaviv_obj->vaddr, obj->size);
rcu_read_lock();
fobj = rcu_dereference(robj->fence);
if (fobj) {
unsigned int i, shared_count = fobj->shared_count;
for (i = 0; i < shared_count; i++) {
fence = rcu_dereference(fobj->shared[i]);
etnaviv_gem_describe_fence(fence, "Shared", m);
}
}
fence = rcu_dereference(robj->fence_excl);
if (fence) etnaviv_gem_describe_fence(fence, "Exclusive", m);
rcu_read_unlock();
}<sep>@@
expression e;
@@
- e.refcount.counter
+ kref_read(&e)
<|end_of_text|> | 8,902 |
--- initial
+++ final
@@ -1,14 +1,14 @@
static int c_show(struct seq_file *m, void *p) {
struct cache_head *cp = p;
struct cache_detail *cd = m->private;
if (p == SEQ_START_TOKEN) return cd->cache_show(m, cd, NULL);
- ifdebug(CACHE) seq_printf(m, "# expiry=%ld refcnt=%d flags=%lx\n", convert_to_wallclock(cp->expiry_time), atomic_read(&cp->ref.refcount), cp->flags);
+ ifdebug(CACHE) seq_printf(m, "# expiry=%ld refcnt=%d flags=%lx\n", convert_to_wallclock(cp->expiry_time), kref_read(&cp->ref), cp->flags);
cache_get(cp);
if (cache_check(cd, cp, NULL)) /* cache_check does a cache_put on failure */
seq_printf(m, "# ");
else {
if (cache_is_expired(cd, cp)) seq_printf(m, "# ");
cache_put(cp, cd);
}
return cd->cache_show(m, cd, cp);
}<sep>@@
expression e;
@@
- atomic_read(&e.refcount)
+ kref_read(&e)
<|end_of_text|> | 8,883 |
--- initial
+++ final
@@ -1,15 +1,15 @@
static struct svc_xprt *svc_xprt_dequeue(struct svc_pool *pool) {
struct svc_xprt *xprt = NULL;
if (list_empty(&pool->sp_sockets)) goto out;
spin_lock_bh(&pool->sp_lock);
if (likely(!list_empty(&pool->sp_sockets))) {
xprt = list_first_entry(&pool->sp_sockets, struct svc_xprt, xpt_ready);
list_del_init(&xprt->xpt_ready);
svc_xprt_get(xprt);
- dprintk("svc: transport %p dequeued, inuse=%d\n", xprt, atomic_read(&xprt->xpt_ref.refcount));
+ dprintk("svc: transport %p dequeued, inuse=%d\n", xprt, kref_read(&xprt->xpt_ref));
}
spin_unlock_bh(&pool->sp_lock);
out:
trace_svc_xprt_dequeue(xprt);
return xprt;
}<sep>@@
expression e;
@@
- atomic_read(&e.refcount)
+ kref_read(&e)
<|end_of_text|> | 8,942 |
--- initial
+++ final
@@ -1,95 +1,95 @@
static void bnx2fc_cmd_timeout(struct work_struct *work) {
struct bnx2fc_cmd *io_req = container_of(work, struct bnx2fc_cmd, timeout_work.work);
u8 cmd_type = io_req->cmd_type;
struct bnx2fc_rport *tgt = io_req->tgt;
int rc;
BNX2FC_IO_DBG(io_req,
"cmd_timeout, cmd_type = %d,"
"req_flags = %lx\n",
cmd_type, io_req->req_flags);
spin_lock_bh(&tgt->tgt_lock);
if (test_and_clear_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags)) {
clear_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags);
/*
* ideally we should hold the io_req until RRQ complets,
* and release io_req from timeout hold.
*/
spin_unlock_bh(&tgt->tgt_lock);
bnx2fc_send_rrq(io_req);
return;
}
if (test_and_clear_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags)) {
BNX2FC_IO_DBG(io_req, "IO ready for reuse now\n");
goto done;
}
switch (cmd_type) {
case BNX2FC_SCSI_CMD:
if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags)) {
/* Handle eh_abort timeout */
BNX2FC_IO_DBG(io_req, "eh_abort timed out\n");
complete(&io_req->tm_done);
} else if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
/* Handle internally generated ABTS timeout */
- BNX2FC_IO_DBG(io_req, "ABTS timed out refcnt = %d\n", io_req->refcount.refcount.counter);
+ BNX2FC_IO_DBG(io_req, "ABTS timed out refcnt = %d\n", kref_read(&io_req->refcount));
if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE, &io_req->req_flags))) {
/*
* Cleanup and return original command to
* mid-layer.
*/
bnx2fc_initiate_cleanup(io_req);
kref_put(&io_req->refcount, bnx2fc_cmd_release);
spin_unlock_bh(&tgt->tgt_lock);
return;
}
} else {
/* Hanlde IO timeout */
BNX2FC_IO_DBG(io_req, "IO timed out. issue ABTS\n");
if (test_and_set_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags)) {
BNX2FC_IO_DBG(io_req, "IO completed before "
" timer expiry\n");
goto done;
}
if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
rc = bnx2fc_initiate_abts(io_req);
if (rc == SUCCESS) goto done;
kref_put(&io_req->refcount, bnx2fc_cmd_release);
spin_unlock_bh(&tgt->tgt_lock);
return;
} else {
BNX2FC_IO_DBG(io_req, "IO already in "
"ABTS processing\n");
}
}
break;
case BNX2FC_ELS:
if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
BNX2FC_IO_DBG(io_req, "ABTS for ELS timed out\n");
if (!test_and_set_bit(BNX2FC_FLAG_ABTS_DONE, &io_req->req_flags)) {
kref_put(&io_req->refcount, bnx2fc_cmd_release);
spin_unlock_bh(&tgt->tgt_lock);
return;
}
} else {
/*
* Handle ELS timeout.
* tgt_lock is used to sync compl path and timeout
* path. If els compl path is processing this IO, we
* have nothing to do here, just release the timer hold
*/
BNX2FC_IO_DBG(io_req, "ELS timed out\n");
if (test_and_set_bit(BNX2FC_FLAG_ELS_DONE, &io_req->req_flags)) goto done;
/* Indicate the cb_func that this ELS is timed out */
set_bit(BNX2FC_FLAG_ELS_TIMEOUT, &io_req->req_flags);
if ((io_req->cb_func) && (io_req->cb_arg)) {
io_req->cb_func(io_req->cb_arg);
io_req->cb_arg = NULL;
}
}
break;
default: printk(KERN_ERR PFX "cmd_timeout: invalid cmd_type %d\n", cmd_type); break;
}
done:
/* release the cmd that was held when timer was set */
kref_put(&io_req->refcount, bnx2fc_cmd_release);
spin_unlock_bh(&tgt->tgt_lock);
}<sep>@@
expression e;
@@
- e.refcount.counter
+ kref_read(&e)
<|end_of_text|> | 8,879 |
--- initial
+++ final
@@ -1,24 +1,24 @@
static void lpfc_nlp_release(struct kref *kref) {
struct lpfc_hba *phba;
unsigned long flags;
struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist, kref);
lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, "node release: did:x%x flg:x%x type:x%x", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
"0279 lpfc_nlp_release: ndlp:x%p did %x "
"usgmap:x%x refcnt:%d rpi:%x\n",
- (void *)ndlp, ndlp->nlp_DID, ndlp->nlp_usg_map, atomic_read(&ndlp->kref.refcount), ndlp->nlp_rpi);
+ (void *)ndlp, ndlp->nlp_DID, ndlp->nlp_usg_map, kref_read(&ndlp->kref), ndlp->nlp_rpi);
/* remove ndlp from action. */
lpfc_nlp_remove(ndlp->vport, ndlp);
/* clear the ndlp active flag for all release cases */
phba = ndlp->phba;
spin_lock_irqsave(&phba->ndlp_lock, flags);
NLP_CLR_NODE_ACT(ndlp);
spin_unlock_irqrestore(&phba->ndlp_lock, flags);
if (phba->sli_rev == LPFC_SLI_REV4) lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
/* free ndlp memory for final ndlp release */
if (NLP_CHK_FREE_REQ(ndlp)) {
kfree(ndlp->lat_data);
if (phba->sli_rev == LPFC_SLI_REV4) mempool_free(ndlp->active_rrqs_xri_bitmap, ndlp->phba->active_rrq_pool);
mempool_free(ndlp, ndlp->phba->nlp_mem_pool);
}
}<sep>@@
expression e;
@@
- atomic_read(&e.refcount)
+ kref_read(&e)
<|end_of_text|> | 8,919 |
--- initial
+++ final
@@ -1,177 +1,177 @@
int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp, enum iwch_qp_attr_mask mask, struct iwch_qp_attributes *attrs, int internal) {
int ret = 0;
struct iwch_qp_attributes newattr = qhp->attr;
unsigned long flag;
int disconnect = 0;
int terminate = 0;
int abort = 0;
int free = 0;
struct iwch_ep *ep = NULL;
PDBG("%s qhp %p qpid 0x%x ep %p state %d -> %d\n", __func__, qhp, qhp->wq.qpid, qhp->ep, qhp->attr.state, (mask & IWCH_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
spin_lock_irqsave(&qhp->lock, flag);
/* Process attr changes if in IDLE */
if (mask & IWCH_QP_ATTR_VALID_MODIFY) {
if (qhp->attr.state != IWCH_QP_STATE_IDLE) {
ret = -EIO;
goto out;
}
if (mask & IWCH_QP_ATTR_ENABLE_RDMA_READ) newattr.enable_rdma_read = attrs->enable_rdma_read;
if (mask & IWCH_QP_ATTR_ENABLE_RDMA_WRITE) newattr.enable_rdma_write = attrs->enable_rdma_write;
if (mask & IWCH_QP_ATTR_ENABLE_RDMA_BIND) newattr.enable_bind = attrs->enable_bind;
if (mask & IWCH_QP_ATTR_MAX_ORD) {
if (attrs->max_ord > rhp->attr.max_rdma_read_qp_depth) {
ret = -EINVAL;
goto out;
}
newattr.max_ord = attrs->max_ord;
}
if (mask & IWCH_QP_ATTR_MAX_IRD) {
if (attrs->max_ird > rhp->attr.max_rdma_reads_per_qp) {
ret = -EINVAL;
goto out;
}
newattr.max_ird = attrs->max_ird;
}
qhp->attr = newattr;
}
if (!(mask & IWCH_QP_ATTR_NEXT_STATE)) goto out;
if (qhp->attr.state == attrs->next_state) goto out;
switch (qhp->attr.state) {
case IWCH_QP_STATE_IDLE:
switch (attrs->next_state) {
case IWCH_QP_STATE_RTS:
if (!(mask & IWCH_QP_ATTR_LLP_STREAM_HANDLE)) {
ret = -EINVAL;
goto out;
}
if (!(mask & IWCH_QP_ATTR_MPA_ATTR)) {
ret = -EINVAL;
goto out;
}
qhp->attr.mpa_attr = attrs->mpa_attr;
qhp->attr.llp_stream_handle = attrs->llp_stream_handle;
qhp->ep = qhp->attr.llp_stream_handle;
qhp->attr.state = IWCH_QP_STATE_RTS;
/*
* Ref the endpoint here and deref when we
* disassociate the endpoint from the QP. This
* happens in CLOSING->IDLE transition or *->ERROR
* transition.
*/
get_ep(&qhp->ep->com);
spin_unlock_irqrestore(&qhp->lock, flag);
ret = rdma_init(rhp, qhp, mask, attrs);
spin_lock_irqsave(&qhp->lock, flag);
if (ret) goto err;
break;
case IWCH_QP_STATE_ERROR:
qhp->attr.state = IWCH_QP_STATE_ERROR;
flush_qp(qhp);
break;
default: ret = -EINVAL; goto out;
}
break;
case IWCH_QP_STATE_RTS:
switch (attrs->next_state) {
case IWCH_QP_STATE_CLOSING:
- BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2);
+ BUG_ON(kref_read(&qhp->ep->com.kref) < 2);
qhp->attr.state = IWCH_QP_STATE_CLOSING;
if (!internal) {
abort = 0;
disconnect = 1;
ep = qhp->ep;
get_ep(&ep->com);
}
break;
case IWCH_QP_STATE_TERMINATE:
qhp->attr.state = IWCH_QP_STATE_TERMINATE;
if (qhp->ibqp.uobject) cxio_set_wq_in_error(&qhp->wq);
if (!internal) terminate = 1;
break;
case IWCH_QP_STATE_ERROR:
qhp->attr.state = IWCH_QP_STATE_ERROR;
if (!internal) {
abort = 1;
disconnect = 1;
ep = qhp->ep;
get_ep(&ep->com);
}
goto err;
break;
default: ret = -EINVAL; goto out;
}
break;
case IWCH_QP_STATE_CLOSING:
if (!internal) {
ret = -EINVAL;
goto out;
}
switch (attrs->next_state) {
case IWCH_QP_STATE_IDLE:
flush_qp(qhp);
qhp->attr.state = IWCH_QP_STATE_IDLE;
qhp->attr.llp_stream_handle = NULL;
put_ep(&qhp->ep->com);
qhp->ep = NULL;
wake_up(&qhp->wait);
break;
case IWCH_QP_STATE_ERROR: goto err;
default: ret = -EINVAL; goto err;
}
break;
case IWCH_QP_STATE_ERROR:
if (attrs->next_state != IWCH_QP_STATE_IDLE) {
ret = -EINVAL;
goto out;
}
if (!Q_EMPTY(qhp->wq.sq_rptr, qhp->wq.sq_wptr) || !Q_EMPTY(qhp->wq.rq_rptr, qhp->wq.rq_wptr)) {
ret = -EINVAL;
goto out;
}
qhp->attr.state = IWCH_QP_STATE_IDLE;
break;
case IWCH_QP_STATE_TERMINATE:
if (!internal) {
ret = -EINVAL;
goto out;
}
goto err;
break;
default:
printk(KERN_ERR "%s in a bad state %d\n", __func__, qhp->attr.state);
ret = -EINVAL;
goto err;
break;
}
goto out;
err:
PDBG("%s disassociating ep %p qpid 0x%x\n", __func__, qhp->ep, qhp->wq.qpid);
/* disassociate the LLP connection */
qhp->attr.llp_stream_handle = NULL;
ep = qhp->ep;
qhp->ep = NULL;
qhp->attr.state = IWCH_QP_STATE_ERROR;
free = 1;
wake_up(&qhp->wait);
BUG_ON(!ep);
flush_qp(qhp);
out:
spin_unlock_irqrestore(&qhp->lock, flag);
if (terminate) iwch_post_terminate(qhp, NULL);
/*
* If disconnect is 1, then we need to initiate a disconnect
* on the EP. This can be a normal close (RTS->CLOSING) or
* an abnormal close (RTS/CLOSING->ERROR).
*/
if (disconnect) {
iwch_ep_disconnect(ep, abort, GFP_KERNEL);
put_ep(&ep->com);
}
/*
* If free is 1, then we've disassociated the EP from the QP
* and we need to dereference the EP.
*/
if (free) put_ep(&ep->com);
PDBG("%s exit state %d\n", __func__, qhp->attr.state);
return ret;
}<sep>@@
expression e;
@@
- atomic_read(&e.refcount)
+ kref_read(&e)
<|end_of_text|> | 8,905 |
--- initial
+++ final
@@ -1,48 +1,48 @@
static int lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size) {
int len = 0;
int cnt;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_nodelist *ndlp;
unsigned char *statep, *name;
cnt = (LPFC_NODELIST_SIZE / LPFC_NODELIST_ENTRY_SIZE);
spin_lock_irq(shost->host_lock);
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
if (!cnt) {
len += snprintf(buf + len, size - len, "Missing Nodelist Entries\n");
break;
}
cnt--;
switch (ndlp->nlp_state) {
case NLP_STE_UNUSED_NODE: statep = "UNUSED"; break;
case NLP_STE_PLOGI_ISSUE: statep = "PLOGI "; break;
case NLP_STE_ADISC_ISSUE: statep = "ADISC "; break;
case NLP_STE_REG_LOGIN_ISSUE: statep = "REGLOG"; break;
case NLP_STE_PRLI_ISSUE: statep = "PRLI "; break;
case NLP_STE_LOGO_ISSUE: statep = "LOGO "; break;
case NLP_STE_UNMAPPED_NODE: statep = "UNMAP "; break;
case NLP_STE_MAPPED_NODE: statep = "MAPPED"; break;
case NLP_STE_NPR_NODE: statep = "NPR "; break;
default: statep = "UNKNOWN";
}
len += snprintf(buf + len, size - len, "%s DID:x%06x ", statep, ndlp->nlp_DID);
name = (unsigned char *)&ndlp->nlp_portname;
len += snprintf(buf + len, size - len, "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x ", *name, *(name + 1), *(name + 2), *(name + 3), *(name + 4), *(name + 5), *(name + 6), *(name + 7));
name = (unsigned char *)&ndlp->nlp_nodename;
len += snprintf(buf + len, size - len, "WWNN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x ", *name, *(name + 1), *(name + 2), *(name + 3), *(name + 4), *(name + 5), *(name + 6), *(name + 7));
if (ndlp->nlp_flag & NLP_RPI_REGISTERED)
len += snprintf(buf + len, size - len, "RPI:%03d ", ndlp->nlp_rpi);
else
len += snprintf(buf + len, size - len, "RPI:none ");
len += snprintf(buf + len, size - len, "flag:x%08x ", ndlp->nlp_flag);
if (!ndlp->nlp_type) len += snprintf(buf + len, size - len, "UNKNOWN_TYPE ");
if (ndlp->nlp_type & NLP_FC_NODE) len += snprintf(buf + len, size - len, "FC_NODE ");
if (ndlp->nlp_type & NLP_FABRIC) len += snprintf(buf + len, size - len, "FABRIC ");
if (ndlp->nlp_type & NLP_FCP_TARGET) len += snprintf(buf + len, size - len, "FCP_TGT sid:%d ", ndlp->nlp_sid);
if (ndlp->nlp_type & NLP_FCP_INITIATOR) len += snprintf(buf + len, size - len, "FCP_INITIATOR ");
len += snprintf(buf + len, size - len, "usgmap:%x ", ndlp->nlp_usg_map);
- len += snprintf(buf + len, size - len, "refcnt:%x", atomic_read(&ndlp->kref.refcount));
+ len += snprintf(buf + len, size - len, "refcnt:%x", kref_read(&ndlp->kref));
len += snprintf(buf + len, size - len, "\n");
}
spin_unlock_irq(shost->host_lock);
return len;
}<sep>@@
expression e;
@@
- atomic_read(&e.refcount)
+ kref_read(&e)
<|end_of_text|> | 8,908 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.