4 * An implementation of the DCCP protocol
5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 #include <linux/dccp.h>
14 #include <linux/kernel.h>
15 #include <linux/skbuff.h>
17 #include <net/inet_sock.h>
24 static inline void dccp_event_ack_sent(struct sock *sk)
26 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
29 static void dccp_skb_entail(struct sock *sk, struct sk_buff *skb)
31 skb_set_owner_w(skb, sk);
32 WARN_ON(sk->sk_send_head);
33 sk->sk_send_head = skb;
37 * All SKB's seen here are completely headerless. It is our
38 * job to build the DCCP header, and pass the packet down to
39 * IP so it can do the same plus pass the packet off to the
42 static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb)
44 if (likely(skb != NULL)) {
45 const struct inet_sock *inet = inet_sk(sk);
46 const struct inet_connection_sock *icsk = inet_csk(sk);
47 struct dccp_sock *dp = dccp_sk(sk);
48 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
50 /* XXX For now we're using only 48 bits sequence numbers */
51 const u32 dccp_header_size = sizeof(*dh) +
52 sizeof(struct dccp_hdr_ext) +
53 dccp_packet_hdr_len(dcb->dccpd_type);
55 u64 ackno = dp->dccps_gsr;
57 dccp_inc_seqno(&dp->dccps_gss);
59 switch (dcb->dccpd_type) {
63 case DCCP_PKT_DATAACK:
66 case DCCP_PKT_REQUEST:
71 case DCCP_PKT_SYNCACK:
72 ackno = dcb->dccpd_seq;
76 * Only data packets should come through with skb->sk
80 skb_set_owner_w(skb, sk);
84 dcb->dccpd_seq = dp->dccps_gss;
86 if (dccp_insert_options(sk, skb)) {
92 /* Build DCCP header and checksum it. */
93 dh = dccp_zeroed_hdr(skb, dccp_header_size);
94 dh->dccph_type = dcb->dccpd_type;
95 dh->dccph_sport = inet->sport;
96 dh->dccph_dport = inet->dport;
97 dh->dccph_doff = (dccp_header_size + dcb->dccpd_opt_len) / 4;
98 dh->dccph_ccval = dcb->dccpd_ccval;
99 dh->dccph_cscov = dp->dccps_pcslen;
100 /* XXX For now we're using only 48 bits sequence numbers */
103 dp->dccps_awh = dp->dccps_gss;
104 dccp_hdr_set_seq(dh, dp->dccps_gss);
106 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), ackno);
108 switch (dcb->dccpd_type) {
109 case DCCP_PKT_REQUEST:
110 dccp_hdr_request(skb)->dccph_req_service =
114 dccp_hdr_reset(skb)->dccph_reset_code =
115 dcb->dccpd_reset_code;
119 icsk->icsk_af_ops->send_check(sk, 0, skb);
122 dccp_event_ack_sent(sk);
124 DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
126 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
127 err = icsk->icsk_af_ops->queue_xmit(skb, sk, 0);
131 /* NET_XMIT_CN is special. It does not guarantee,
132 * that this packet is lost. It tells that device
133 * is about to start to drop packets or already
134 * drops some packets of the same priority and
135 * invokes us to send less aggressively.
137 return err == NET_XMIT_CN ? 0 : err;
142 unsigned int dccp_sync_mss(struct sock *sk, u32 pmtu)
144 struct inet_connection_sock *icsk = inet_csk(sk);
145 struct dccp_sock *dp = dccp_sk(sk);
146 int mss_now = (pmtu - icsk->icsk_af_ops->net_header_len -
147 sizeof(struct dccp_hdr) - sizeof(struct dccp_hdr_ext));
149 /* Now subtract optional transport overhead */
150 mss_now -= icsk->icsk_ext_hdr_len;
153 * FIXME: this should come from the CCID infrastructure, where, say,
154 * TFRC will say it wants TIMESTAMPS, ELAPSED time, etc, for now lets
155 * put a rough estimate for NDP + TIMESTAMP + TIMESTAMP_ECHO + ELAPSED
156 * TIME + TFRC_OPT_LOSS_EVENT_RATE + TFRC_OPT_RECEIVE_RATE + padding to
157 * make it a multiple of 4
160 mss_now -= ((5 + 6 + 10 + 6 + 6 + 6 + 3) / 4) * 4;
162 /* And store cached results */
163 icsk->icsk_pmtu_cookie = pmtu;
164 dp->dccps_mss_cache = mss_now;
169 EXPORT_SYMBOL_GPL(dccp_sync_mss);
171 void dccp_write_space(struct sock *sk)
173 read_lock(&sk->sk_callback_lock);
175 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
176 wake_up_interruptible(sk->sk_sleep);
177 /* Should agree with poll, otherwise some programs break */
178 if (sock_writeable(sk))
179 sk_wake_async(sk, 2, POLL_OUT);
181 read_unlock(&sk->sk_callback_lock);
185 * dccp_wait_for_ccid - Wait for ccid to tell us we can send a packet
186 * @sk: socket to wait for
187 * @timeo: for how long
189 static int dccp_wait_for_ccid(struct sock *sk, struct sk_buff *skb,
192 struct dccp_sock *dp = dccp_sk(sk);
198 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
204 if (signal_pending(current))
207 rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb,
211 delay = msecs_to_jiffies(rc);
212 if (delay > *timeo || delay < 0)
215 sk->sk_write_pending++;
217 *timeo -= schedule_timeout(delay);
219 sk->sk_write_pending--;
222 finish_wait(sk->sk_sleep, &wait);
232 rc = sock_intr_errno(*timeo);
236 static void dccp_write_xmit_timer(unsigned long data) {
237 struct sock *sk = (struct sock *)data;
238 struct dccp_sock *dp = dccp_sk(sk);
241 if (sock_owned_by_user(sk))
242 sk_reset_timer(sk, &dp->dccps_xmit_timer, jiffies+1);
244 dccp_write_xmit(sk, 0);
249 void dccp_write_xmit(struct sock *sk, int block)
251 struct dccp_sock *dp = dccp_sk(sk);
253 long timeo = DCCP_XMIT_TIMEO; /* If a packet is taking longer than
254 this we have other issues */
256 while ((skb = skb_peek(&sk->sk_write_queue))) {
257 int err = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb,
262 sk_reset_timer(sk, &dp->dccps_xmit_timer,
263 msecs_to_jiffies(err)+jiffies);
266 err = dccp_wait_for_ccid(sk, skb, &timeo);
267 timeo = DCCP_XMIT_TIMEO;
270 printk(KERN_CRIT "%s:err at dccp_wait_for_ccid"
271 " %d\n", __FUNCTION__, err);
276 skb_dequeue(&sk->sk_write_queue);
278 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
279 const int len = skb->len;
281 if (sk->sk_state == DCCP_PARTOPEN) {
282 /* See 8.1.5. Handshake Completion */
283 inet_csk_schedule_ack(sk);
284 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
285 inet_csk(sk)->icsk_rto,
287 dcb->dccpd_type = DCCP_PKT_DATAACK;
288 } else if (dccp_ack_pending(sk))
289 dcb->dccpd_type = DCCP_PKT_DATAACK;
291 dcb->dccpd_type = DCCP_PKT_DATA;
293 err = dccp_transmit_skb(sk, skb);
294 ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, 0, len);
296 printk(KERN_CRIT "%s:err from "
297 "ccid_hc_tx_packet_sent %d\n",
306 int dccp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
308 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk) != 0)
309 return -EHOSTUNREACH; /* Routing failure or similar. */
311 return dccp_transmit_skb(sk, (skb_cloned(skb) ?
312 pskb_copy(skb, GFP_ATOMIC):
313 skb_clone(skb, GFP_ATOMIC)));
316 struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst,
317 struct request_sock *req)
320 struct dccp_request_sock *dreq;
321 const u32 dccp_header_size = sizeof(struct dccp_hdr) +
322 sizeof(struct dccp_hdr_ext) +
323 sizeof(struct dccp_hdr_response);
324 struct sk_buff *skb = sock_wmalloc(sk, sk->sk_prot->max_header, 1,
329 /* Reserve space for headers. */
330 skb_reserve(skb, sk->sk_prot->max_header);
332 skb->dst = dst_clone(dst);
334 dreq = dccp_rsk(req);
335 if (inet_rsk(req)->acked) /* increase ISS upon retransmission */
336 dccp_inc_seqno(&dreq->dreq_iss);
337 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESPONSE;
338 DCCP_SKB_CB(skb)->dccpd_seq = dreq->dreq_iss;
340 if (dccp_insert_options(sk, skb)) {
345 dh = dccp_zeroed_hdr(skb, dccp_header_size);
347 dh->dccph_sport = inet_sk(sk)->sport;
348 dh->dccph_dport = inet_rsk(req)->rmt_port;
349 dh->dccph_doff = (dccp_header_size +
350 DCCP_SKB_CB(skb)->dccpd_opt_len) / 4;
351 dh->dccph_type = DCCP_PKT_RESPONSE;
353 dccp_hdr_set_seq(dh, dreq->dreq_iss);
354 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dreq->dreq_isr);
355 dccp_hdr_response(skb)->dccph_resp_service = dreq->dreq_service;
357 dccp_csum_outgoing(skb);
359 /* We use `acked' to remember that a Response was already sent. */
360 inet_rsk(req)->acked = 1;
361 DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
365 EXPORT_SYMBOL_GPL(dccp_make_response);
367 static struct sk_buff *dccp_make_reset(struct sock *sk, struct dst_entry *dst,
368 const enum dccp_reset_codes code)
372 struct dccp_sock *dp = dccp_sk(sk);
373 const u32 dccp_header_size = sizeof(struct dccp_hdr) +
374 sizeof(struct dccp_hdr_ext) +
375 sizeof(struct dccp_hdr_reset);
376 struct sk_buff *skb = sock_wmalloc(sk, sk->sk_prot->max_header, 1,
381 /* Reserve space for headers. */
382 skb_reserve(skb, sk->sk_prot->max_header);
384 skb->dst = dst_clone(dst);
386 dccp_inc_seqno(&dp->dccps_gss);
388 DCCP_SKB_CB(skb)->dccpd_reset_code = code;
389 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESET;
390 DCCP_SKB_CB(skb)->dccpd_seq = dp->dccps_gss;
392 if (dccp_insert_options(sk, skb)) {
397 dh = dccp_zeroed_hdr(skb, dccp_header_size);
399 dh->dccph_sport = inet_sk(sk)->sport;
400 dh->dccph_dport = inet_sk(sk)->dport;
401 dh->dccph_doff = (dccp_header_size +
402 DCCP_SKB_CB(skb)->dccpd_opt_len) / 4;
403 dh->dccph_type = DCCP_PKT_RESET;
405 dccp_hdr_set_seq(dh, dp->dccps_gss);
406 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dp->dccps_gsr);
408 dccp_hdr_reset(skb)->dccph_reset_code = code;
409 inet_csk(sk)->icsk_af_ops->send_check(sk, 0, skb);
411 DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
415 int dccp_send_reset(struct sock *sk, enum dccp_reset_codes code)
418 * FIXME: what if rebuild_header fails?
419 * Should we be doing a rebuild_header here?
421 int err = inet_sk_rebuild_header(sk);
424 struct sk_buff *skb = dccp_make_reset(sk, sk->sk_dst_cache,
427 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
428 err = inet_csk(sk)->icsk_af_ops->queue_xmit(skb, sk, 0);
429 if (err == NET_XMIT_CN)
438 * Do all connect socket setups that can be done AF independent.
440 static inline void dccp_connect_init(struct sock *sk)
442 struct dccp_sock *dp = dccp_sk(sk);
443 struct dst_entry *dst = __sk_dst_get(sk);
444 struct inet_connection_sock *icsk = inet_csk(sk);
447 sock_reset_flag(sk, SOCK_DONE);
449 dccp_sync_mss(sk, dst_mtu(dst));
451 dccp_update_gss(sk, dp->dccps_iss);
453 * SWL and AWL are initially adjusted so that they are not less than
454 * the initial Sequence Numbers received and sent, respectively:
455 * SWL := max(GSR + 1 - floor(W/4), ISR),
456 * AWL := max(GSS - W' + 1, ISS).
457 * These adjustments MUST be applied only at the beginning of the
460 dccp_set_seqno(&dp->dccps_awl, max48(dp->dccps_awl, dp->dccps_iss));
462 icsk->icsk_retransmits = 0;
463 init_timer(&dp->dccps_xmit_timer);
464 dp->dccps_xmit_timer.data = (unsigned long)sk;
465 dp->dccps_xmit_timer.function = dccp_write_xmit_timer;
468 int dccp_connect(struct sock *sk)
471 struct inet_connection_sock *icsk = inet_csk(sk);
473 dccp_connect_init(sk);
475 skb = alloc_skb(sk->sk_prot->max_header, sk->sk_allocation);
476 if (unlikely(skb == NULL))
479 /* Reserve space for headers. */
480 skb_reserve(skb, sk->sk_prot->max_header);
482 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_REQUEST;
484 dccp_skb_entail(sk, skb);
485 dccp_transmit_skb(sk, skb_clone(skb, GFP_KERNEL));
486 DCCP_INC_STATS(DCCP_MIB_ACTIVEOPENS);
488 /* Timer for repeating the REQUEST until an answer. */
489 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
490 icsk->icsk_rto, DCCP_RTO_MAX);
494 EXPORT_SYMBOL_GPL(dccp_connect);
496 void dccp_send_ack(struct sock *sk)
498 /* If we have been reset, we may not send again. */
499 if (sk->sk_state != DCCP_CLOSED) {
500 struct sk_buff *skb = alloc_skb(sk->sk_prot->max_header,
504 inet_csk_schedule_ack(sk);
505 inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
506 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
512 /* Reserve space for headers */
513 skb_reserve(skb, sk->sk_prot->max_header);
514 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_ACK;
515 dccp_transmit_skb(sk, skb);
519 EXPORT_SYMBOL_GPL(dccp_send_ack);
521 void dccp_send_delayed_ack(struct sock *sk)
523 struct inet_connection_sock *icsk = inet_csk(sk);
525 * FIXME: tune this timer. elapsed time fixes the skew, so no problem
526 * with using 2s, and active senders also piggyback the ACK into a
527 * DATAACK packet, so this is really for quiescent senders.
529 unsigned long timeout = jiffies + 2 * HZ;
531 /* Use new timeout only if there wasn't a older one earlier. */
532 if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
533 /* If delack timer was blocked or is about to expire,
536 * FIXME: check the "about to expire" part
538 if (icsk->icsk_ack.blocked) {
543 if (!time_before(timeout, icsk->icsk_ack.timeout))
544 timeout = icsk->icsk_ack.timeout;
546 icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
547 icsk->icsk_ack.timeout = timeout;
548 sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
551 void dccp_send_sync(struct sock *sk, const u64 seq,
552 const enum dccp_pkt_type pkt_type)
555 * We are not putting this on the write queue, so
556 * dccp_transmit_skb() will set the ownership to this
559 struct sk_buff *skb = alloc_skb(sk->sk_prot->max_header, GFP_ATOMIC);
562 /* FIXME: how to make sure the sync is sent? */
565 /* Reserve space for headers and prepare control bits. */
566 skb_reserve(skb, sk->sk_prot->max_header);
567 DCCP_SKB_CB(skb)->dccpd_type = pkt_type;
568 DCCP_SKB_CB(skb)->dccpd_seq = seq;
570 dccp_transmit_skb(sk, skb);
573 EXPORT_SYMBOL_GPL(dccp_send_sync);
576 * Send a DCCP_PKT_CLOSE/CLOSEREQ. The caller locks the socket for us. This
577 * cannot be allowed to fail queueing a DCCP_PKT_CLOSE/CLOSEREQ frame under
580 void dccp_send_close(struct sock *sk, const int active)
582 struct dccp_sock *dp = dccp_sk(sk);
584 const gfp_t prio = active ? GFP_KERNEL : GFP_ATOMIC;
586 skb = alloc_skb(sk->sk_prot->max_header, prio);
590 /* Reserve space for headers and prepare control bits. */
591 skb_reserve(skb, sk->sk_prot->max_header);
592 DCCP_SKB_CB(skb)->dccpd_type = dp->dccps_role == DCCP_ROLE_CLIENT ?
593 DCCP_PKT_CLOSE : DCCP_PKT_CLOSEREQ;
596 dccp_write_xmit(sk, 1);
597 dccp_skb_entail(sk, skb);
598 dccp_transmit_skb(sk, skb_clone(skb, prio));
599 /* FIXME do we need a retransmit timer here? */
601 dccp_transmit_skb(sk, skb);