@@ -194,7 +194,8 @@ static void ssip_skb_to_msg(struct sk_buff *skb, struct hsi_msg *msg)
sg = sg_next(sg);
BUG_ON(!sg);
frag = &skb_shinfo(skb)->frags[i];
- sg_set_page(sg, frag->page.p, frag->size, frag->page_offset);
+ sg_set_page(sg, skb_page_frag(frag), frag->size,
+ frag->page_offset);
}
}
@@ -2463,7 +2463,7 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
g->sg[(i >> 2)].ptr[(i & 3)] =
dma_map_page(&oct->pci_dev->dev,
- frag->page.p,
+ skb_frag_page(frag),
frag->page_offset,
frag->size,
DMA_TO_DEVICE);
@@ -1536,7 +1536,7 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
g->sg[(i >> 2)].ptr[(i & 3)] =
dma_map_page(&oct->pci_dev->dev,
- frag->page.p,
+ skb_frag_page(frag),
frag->page_offset,
frag->size,
DMA_TO_DEVICE);
@@ -387,7 +387,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
ebdp->cbd_esc = cpu_to_fec32(estatus);
}
- bufaddr = page_address(this_frag->page.p) + this_frag->page_offset;
+ bufaddr = skb_frag_address(this_frag);
index = fec_enet_get_bd_index(bdp, &txq->bd);
if (((unsigned long) bufaddr) & fep->tx_align ||
@@ -2349,7 +2349,7 @@ static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
for (i = 0; i < nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- void *addr = page_address(frag->page.p) + frag->page_offset;
+ void *addr = skb_frag_address(frag);
tx_desc = mvneta_txq_next_desc_get(txq);
tx_desc->data_size = frag->size;
@@ -2822,7 +2822,7 @@ static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- void *addr = page_address(frag->page.p) + frag->page_offset;
+ void *addr = skb_frag_address(frag);
tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
@@ -1400,7 +1400,8 @@ static void emac_tx_fill_tpd(struct emac_adapter *adpt,
tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx);
tpbuf->length = frag->size;
tpbuf->dma_addr = dma_map_page(adpt->netdev->dev.parent,
- frag->page.p, frag->page_offset,
+ skb_frag_page(frag),
+ frag->page_offset,
tpbuf->length, DMA_TO_DEVICE);
ret = dma_mapping_error(adpt->netdev->dev.parent,
tpbuf->dma_addr);
@@ -1338,7 +1338,7 @@ static int build_dma_sg(const struct sk_buff *skb, struct urb *urb)
struct skb_frag_struct *f = &skb_shinfo(skb)->frags[i];
total_len += skb_frag_size(f);
- sg_set_page(&urb->sg[i + s], f->page.p, f->size,
+ sg_set_page(&urb->sg[i + s], skb_frag_page(f), f->size,
f->page_offset);
}
urb->transfer_buffer_length = total_len;
@@ -1055,7 +1055,7 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s
int j;
skb->truesize += skb->data_len;
for (j = 0; j < i; j++)
- put_page(frags[j].page.p);
+ put_page(skb_frag_page(&frags[j]));
return -ENOMEM;
}
@@ -1067,7 +1067,7 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s
BUG();
offset += len;
- frags[i].page.p = page;
+ __skb_frag_set_page(&frags[i], page);
frags[i].page_offset = 0;
skb_frag_size_set(&frags[i], len);
}
@@ -281,8 +281,7 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
struct skb_frag_struct *fs = skb_shinfo(skb)->frags + i;
hw_buffer.s.addr = XKPHYS_TO_PHYS(
- (u64)(page_address(fs->page.p) +
- fs->page_offset));
+ (u64)skb_frag_address(fs));
hw_buffer.s.size = fs->size;
CVM_OCT_SKB_CB(skb)[i + 1] = hw_buffer.u64;
}
@@ -902,9 +902,9 @@ cxgbit_handle_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr,
skb_frag_t *dfrag = &ssi->frags[pdu_cb->dfrag_idx];
sg_init_table(&ccmd->sg, 1);
- sg_set_page(&ccmd->sg, dfrag->page.p, skb_frag_size(dfrag),
- dfrag->page_offset);
- get_page(dfrag->page.p);
+ sg_set_page(&ccmd->sg, skb_frag_page(dfrag),
+ skb_frag_size(dfrag), dfrag->page_offset);
+ get_page(skb_frag_page(dfrag));
cmd->se_cmd.t_data_sg = &ccmd->sg;
cmd->se_cmd.t_data_nents = 1;
@@ -2337,7 +2337,7 @@ int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
slen = min_t(size_t, len, frag->size - offset);
while (slen) {
- ret = kernel_sendpage_locked(sk, frag->page.p,
+ ret = kernel_sendpage_locked(sk, skb_frag_page(frag),
frag->page_offset + offset,
slen, MSG_DONTWAIT);
if (ret <= 0)
@@ -3459,7 +3459,7 @@ static inline skb_frag_t skb_head_frag_to_page_desc(struct sk_buff *frag_skb)
struct page *page;
page = virt_to_head_page(frag_skb->head);
- head_frag.page.p = page;
+ __skb_frag_set_page(&head_frag, page);
head_frag.page_offset = frag_skb->data -
(unsigned char *)page_address(page);
head_frag.size = skb_headlen(frag_skb);
@@ -3855,7 +3855,7 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags;
- frag->page.p = page;
+ __skb_frag_set_page(frag, page);
frag->page_offset = first_offset;
skb_frag_size_set(frag, first_size);
@@ -56,7 +56,7 @@ void tso_build_data(struct sk_buff *skb, struct tso_t *tso, int size)
/* Move to next segment */
tso->size = frag->size;
- tso->data = page_address(frag->page.p) + frag->page_offset;
+ tso->data = skb_frag_address(frag);
tso->next_frag_idx++;
}
}
@@ -80,7 +80,7 @@ void tso_start(struct sk_buff *skb, struct tso_t *tso)
/* Move to next segment */
tso->size = frag->size;
- tso->data = page_address(frag->page.p) + frag->page_offset;
+ tso->data = skb_frag_address(frag);
tso->next_frag_idx++;
}
}
@@ -644,7 +644,7 @@ static int kcm_write_msgs(struct kcm_sock *kcm)
}
ret = kernel_sendpage(psock->sk->sk_socket,
- frag->page.p,
+ skb_frag_page(frag),
frag->page_offset + frag_offset,
frag->size - frag_offset,
MSG_DONTWAIT);
@@ -233,12 +233,12 @@ static void tls_append_frag(struct tls_record_info *record,
skb_frag_t *frag;
frag = &record->frags[record->num_frags - 1];
- if (frag->page.p == pfrag->page &&
+ if (skb_frag_page(frag) == pfrag->page &&
frag->page_offset + frag->size == pfrag->offset) {
frag->size += size;
} else {
++frag;
- frag->page.p = pfrag->page;
+ __skb_frag_set_page(frag, pfrag->page);
frag->page_offset = pfrag->offset;
frag->size = size;
++record->num_frags;