diff -u -r -N ./net/Kconfig ./net/Kconfig
--- ./net/Kconfig	2007-12-26 08:10:12.000000000 +0100
+++ ./net/Kconfig	2007-12-26 08:08:10.000000000 +0100
@@ -32,6 +32,11 @@
 source "net/xfrm/Kconfig"
 source "net/iucv/Kconfig"
 
+config COR
+	bool "Connecion oriented routing"
+	---help---
+	  (no help avialable)
+
 config INET
 	bool "TCP/IP networking"
 	---help---
diff -u -r -N ./net/Makefile ./net/Makefile
--- ./net/Makefile	2007-12-26 08:10:11.000000000 +0100
+++ ./net/Makefile	2007-12-26 08:08:09.000000000 +0100
@@ -8,6 +8,7 @@
 obj-y	:= nonet.o
 
 obj-$(CONFIG_NET)		:= socket.o core/
+obj-$(CONFIG_COR)		+= cor/
 
 tmp-$(CONFIG_COMPAT) 		:= compat.o
 obj-$(CONFIG_NET)		+= $(tmp-y)
diff -u -r -N ./net/cor/Makefile ./net/cor/Makefile
--- ./net/cor/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ ./net/cor/Makefile	2008-01-01 15:57:59.000000000 +0100
@@ -0,0 +1 @@
+obj-y	 := common.o qdisc.o rcv.o snd.o sock.o kpacket_parse.o kpacket_gen.o cpacket_parse.o cpacket_gen.o
diff -u -r -N ./net/cor/common.c ./net/cor/common.c
--- ./net/cor/common.c	1970-01-01 01:00:00.000000000 +0100
+++ ./net/cor/common.c	2008-01-01 15:57:59.000000000 +0100
@@ -0,0 +1,239 @@
+#include <linux/mutex.h>
+
+#include "cor.h"
+
+DEFINE_MUTEX(cor_bindnodes);
+struct bindnode *choosenports;
+struct bindnode *randomports;
+
+
+struct cell_hdr{
+	#warning odo init
+	spinlock_t lock;
+};
+
+
+inline int hdr_size()
+{
+	return (sizeof(struct cell_hdr) + sizeof(void *) - 1) / sizeof(void *);
+}
+
+inline int elements_per_cell(int cell_size)
+{
+	return cell_size/sizeof(void *) - hdr_size();
+}
+
+inline struct cell_hdr *cell_addr(struct htable *ht, u_int32_t id)
+{
+	int idx = (id%ht->htable_size) / (elements_per_cell(ht->cell_size));
+	return (struct cell_hdr *) (((char *)ht->htable) + ht->cell_size * idx);
+}
+
+inline struct htab_entry **element_addr(struct htable *ht, u_int32_t id)
+{
+	int idx = (id%ht->htable_size) % (elements_per_cell(ht->cell_size));
+	return ((struct htab_entry **)cell_addr(ht, id)) +
+			hdr_size()/sizeof(void *) + idx;
+}
+
+/*
+ *
+ * if *element == 0 ==> search and pop the packet from the htable
+ * else insert the packet into the htable
+ *
+ * return 1 if the element was found
+ */
+int search_htab(struct htable *ht, __u32 key, __u32 key_offset,
+		struct htab_entry **element)
+{
+	long iflags;
+
+	struct cell_hdr *hdr = cell_addr(ht, key);
+	struct htab_entry **curr = element_addr(ht, key);
+
+	BUG_TRAP(0 != key_offset%4);
+	BUG_TRAP(0 != curr);
+
+	spin_lock_irqsave( &(hdr->lock), iflags );
+
+	while (1) {
+		if (0 == *curr)
+			goto end;
+
+		if (((__u32 *)curr)[key_offset/4] == key)
+			goto found;
+
+		curr = &((*curr)->next);
+	}
+
+found:
+	if (0 == *element) {
+		*element = *curr;
+		*(curr) = (*curr)->next;
+	}
+	spin_unlock_irqrestore( &(hdr->lock), iflags );
+	return 1;
+end:
+	if (0 != *element) {
+		(*element)->next = (*curr)->next;
+		(*curr)->next = *element;
+	}
+	spin_unlock_irqrestore( &(hdr->lock), iflags );
+	return 0;
+}
+
+
+void init_htable(struct htable *ht)
+{
+	int num_cells;
+	BUG_TRAP(0 == ht);
+	ht->htable = kmalloc(PAGE_SIZE, GFP_KERNEL);
+	ht->cell_size = 256;
+	num_cells = PAGE_SIZE/sizeof(void *)/ht->cell_size;
+	ht->htable_size = num_cells * elements_per_cell(ht->cell_size);
+	ht->num_elements = 0;
+}
+
+
+static void bind_top(struct conn *conn, __u16 addrlen, __u8 *addr)
+{
+	BUG_TRAP(0 != conn->bind_node);
+}
+
+static void bind_node(struct conn *conn, __u16 addrlen, __u8 *addr)
+{
+	if (0 == conn->bind_node) {
+	#warning todo return error
+	}
+}
+
+void bind(struct conn *conn, __u16 addrlen, __u8 *addr)
+{
+	#warning todo bind to neighbor
+
+	mutex_lock(&cor_bindnodes);
+
+	switch (conn->targettype) {
+	case TARGET_UNBOUND:
+		bind_top(conn, addrlen, addr);
+		break;
+	case TARGET_RANDOMPORT:
+	case TARGET_CHOOSENPORT:
+		bind_node(conn, addrlen, addr);
+		break;
+	default:
+		BUG();
+	}
+
+	mutex_unlock(&cor_bindnodes);
+	/*struct binddir **curr;
+	unsigned long iflags;
+
+	if (0 == conn->bind_node) {
+		BUG_TRAP(conn->nextbindconn != 0 || conn->prevbindconn != 0);
+
+		spin_lock_irqsave( &(top.lock), iflags );
+		conn->bind_node = &top;
+		conn->nextbindconn = top.conns;
+		top.conns = conn;
+		spin_unlock_irqrestore( &(top.lock), iflags );
+	}
+
+	curr = &(conn->bind_node);
+
+	spin_lock_irqsave( &(conn->bind_node->lock), iflags );
+
+	while (*curr) {
+		if ((*curr)->addrlen == addrlen) {
+			if (0 == memcmp((*curr)->addr, addr, addrlen))
+				goto found;
+		}
+
+		curr = &((*curr)->next);
+	}
+
+	*curr = kmalloc(sizeof(struct binddir), GFP_KERNEL);
+	spin_lock_init( &((*curr)->lock) );
+	(*curr)->up = conn->bind_node;
+	(*curr)->subdirs = 0;
+	(*curr)->next = 0;
+	(*curr)->prev = (struct binddir *)
+			(((char *)*curr) + offsetof(struct binddir, next));
+	if ((*curr)->prev == (*curr)->up)
+		(*curr)->prev = 0;
+	(*curr)->owner = conn;
+	(*curr)->conns = 0;
+	(*curr)->addrlen = addrlen;
+	(*curr)->addr = kmalloc(sizeof(struct binddir), GFP_KERNEL);
+	memcpy((*curr)->addr, addr, addrlen);
+
+	goto out;
+found:
+	#warning todo review
+	if (0 != conn->prevbindconn) {
+		BUG_TRAP(conn->bind_node->conns == conn);
+		conn->prevbindconn->nextbindconn = conn->nextbindconn;
+		conn->nextbindconn->prevbindconn = conn->prevbindconn;
+	} else {
+		BUG_TRAP(conn->bind_node->conns != conn);
+		conn->bind_node->conns = conn->nextbindconn;
+	}
+	conn->prevbindconn = conn->nextbindconn = 0;
+
+	conn->bind_node = *curr;
+	conn->nextbindconn = (*curr)->conns;
+	(*curr)->conns = conn;
+
+out:
+	spin_unlock_irqrestore( &(conn->bind_node->lock), iflags );*/
+}
+
+int multicast(struct conn *conn)
+{
+	int ret;
+
+	BUG_TRAP(0 == conn);
+
+	mutex_lock(&cor_bindnodes);
+	if (conn == conn->bind_node->owner) {
+		conn->bindtype = BINDTYPE_MCAST;
+		#warning todo ret_ok
+	} else{
+		#warning todo ret_error
+	}
+	mutex_unlock(&cor_bindnodes);
+	return ret;
+}
+
+int listen(struct conn *conn)
+{
+	int ret;
+
+	BUG_TRAP(conn == 0);
+
+	mutex_lock(&cor_bindnodes);
+	if (conn == conn->bind_node->owner) {
+		conn->bindtype = BINDTYPE_LISTEN;
+		#warning todo ret_ok
+	} else {
+		#warning todo ret_error
+	}
+	mutex_unlock(&cor_bindnodes);
+	return ret;
+}
+
+int connect(struct conn *conn)
+{
+}
+
+int connect_success(struct conn *conn)
+{
+}
+
+void dec_connrefs(struct conn *conn)
+{
+	atomic_dec(&(conn->refs));
+	#warning todo free
+}
+
+MODULE_LICENSE("GPL");
diff -u -r -N ./net/cor/cor.h ./net/cor/cor.h
--- ./net/cor/cor.h	1970-01-01 01:00:00.000000000 +0100
+++ ./net/cor/cor.h	2008-01-01 15:57:59.000000000 +0100
@@ -0,0 +1,655 @@
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <asm-sh/kgdb.h>
+#include <linux/spinlock.h>
+
+/* psched_time_t */
+#include <net/pkt_sched.h>
+
+
+#define ETH_P_COR 0x1022
+
+/*
+ * Kernel packet data - these commands are sent by the neighbor
+ * The end nodes may cause these commands to be sent, but they see them beyond
+ * the first hop.
+ */
+
+/*
+ * speedinfo[2] =
+ *     buffer_state_value = speedinfo % 181
+ *     speed_value = speedinfo / 181
+ *
+ *     buffer_state = 1024 * pow(2, buffer_state_value/3.0)
+ *     speed = 1024 * pow(2, speed_value/12.0)
+ *   see the .0 ...
+ *
+ * This has to be done either with floating points (which is no so nice) or
+ * you can calculate:
+ *     buffer_state =  pow(2, value/3) *
+ *         1024 * pow(pow(2, 1.0/3), buffer_state_value%3)
+ *   where 1024 * pow(pow(2, 1.0/4), value%3) can be just a table lookup
+ *   (the "1024" should be part of the value in the table, because it increases
+ *   the accuracy)
+ *
+ *   you can do the same with the speed
+ *
+ *
+ * Some values have special meanings:
+ *   if speedinfo is the highest possible value(65535), it means both values are
+ *     inifinite
+ *   if buffer_state_value if > 91, you have to subtract 90 and make the
+ *     resulting buffer_state negative
+ */
+
+
+/*
+ * packet_id_definition =
+ *   packet_id_def_count[2] (only on (N)ACK)
+ *   num_new_packet_ids[1]
+ *   new_packet_id[num_new_packet_ids][4]
+ *
+ * packet_id_def_count = counter of the definition os this connection
+ *   this is neccesary to ensure that the other side recieves the packet ids
+ *   int the correct order
+ */
+
+/* PADDING[1] */
+#define KP_PADDING 1
+
+/* ACK1[1] sent_packet_id[4] */
+#define KP_ACK1 2
+
+/* ACK2[1] sent_packet_id[4] speedinfo[2] */
+#define KP_ACK2 3
+
+/* ACK3[1] sent_packet_id[4] packet_id_definition */
+#define KP_ACK3 4
+
+/* ACK4[1] sent_packet_id[4] speedinfo[2], packet_id_definition */
+#define KP_ACK4 5
+
+/* see ACK[1-4] */
+#define KP_NACK1 6
+#define KP_NACK2 7
+#define KP_NACK3 8
+#define KP_NACK4 9
+
+/*
+ * incoming connection, the packet_id_definition is our send channel
+ * CONNECT[1] packet_id_definition
+ */
+#define KP_CONNECT 10
+
+/*
+ * incoming connection successful, packet_id_definition is our receive channel
+ * CONNECT_SUCCESS[1] packet_id[4] packet_id_definition
+ */
+#define KP_CONNECT_SUCCESS 11
+
+/* CONNECT_FAILED_*[1] packet_id[4] */
+#define KP_CONNECT_FAILED_TEMP 12
+#define KP_CONNECT_FAILED_PERM 13
+
+/*
+ * FLUSH_CONN_BUF[1] packet_id[4] offset[2]
+ * (flush until offset bytes after the beginning of the packet)
+ */
+#define KP_FLUSH_CONN_BUF 14
+
+/* CONN_DATA[1] packet_id[4] length[2] data[length] */
+#define KP_CONN_DATA 15
+
+/* CONN_CLOSE[1] packet_id[4] */
+#define KP_CONN_EOF 16
+
+/*
+ * { CONN_RESET_FW[1] packet_id[4] }
+ * We send this, if there is an established connection we want to close.
+ */
+#define KP_CONN_RESET_FW 17
+
+/*
+ * { CONN_RESET_BW[1] sent_packet_id[4] }
+ * This is the response we send when we receive a packet with an invalid
+ * packet_id. Note that when receiving the packet_id needs to be looked up
+ * in the list of packet_ids we send to this neighbor, not in the packet_id
+ * list for routing the packet"
+ */
+#define KP_CONN_RESET_BW 18
+
+
+
+/*
+ * Connection data which in interpreted when connection has no target yet
+ * These commands are sent by the end node.
+ *
+ * Format:
+ * length[4] cmd[2] parameter[length]
+ * unrecogniced commands are ignored
+ * parameters which are longer than expected are ignored as well
+ */
+
+/* CD_PADDING[2] SOME_JUNK[length] */
+#define CD_PADDING 1
+
+/* CD_BIND[2] numbinds[1] {addrlen[2] addr[addrlen]}[numbinds] */
+#define CD_BIND 2
+
+/* CD_BIND_MCAST[2] */
+#define CD_BIND_MCAST 3
+
+/* CD_BIND_LISTEN[2] */
+#define CD_BIND_LISTEN 4
+
+/* outgoing connection: CD_BIND_CONNECT[2] */
+#define CD_BIND_CONNECT 5
+
+/*
+ * Connection data response
+ * Format is the same as with connection data
+ */
+
+/* CDR_OK[2] */
+#define CDR_OK 1
+
+/*
+ * CDR_OK_PARAMETER_IGNORED[2] first_ignored_parameter_offset[4]
+ * The parameter was executed successfully, but some parameter were not expected
+ * and have been ignored. The first_ignored_parameter_offset says how many bytes
+ * of the parameters were expected and parsed.
+ */
+#define CDR_OK_PARAMETER_IGNORED 2
+
+/*
+ * CDR_EXECUTION_FAILED[2] reasoncode[2] reasontextlength[2]
+ * reasontext[reasontextlength]
+ */
+#define CDR_EXECUTION_FAILED 3
+
+#define CDR_EXECFAILEDREASON_UNKNOWN_COMMAND 1
+#define CDR_EXECFAILEDREASON_ADDRESS_ALREADY_IN_USE 2
+#define CDR_EXECFAILEDREASON_PERMISSION_DENIED 3
+
+
+/* result codes for rcv.c/proc_packet */
+#define RC_DROP 0
+#define RC_FINISHED 1
+
+#define RC_RCV1_ANNOUNCE 2
+#define RC_RCV1_KERNEL 3
+#define RC_RCV1_CONN 4
+
+struct htab_entry{
+	struct htab_entry *next;
+};
+
+struct htable{
+	struct htab_entry **htable;
+	__u32 htable_size;
+	__u32 cell_size;
+	__u32 num_elements;
+};
+
+struct conn;
+
+#warning todo
+#define MSGTYPE_BINDMCAST 3
+#define MSGTYPE_BINDLISTEN 4
+#define MSGTYPE_BINDCONNECT 5
+struct conn_data_msg{
+	union{
+		/* bind_*: packet_id has to be first (see packet_gen.c) */
+		struct{
+			__u32 packet_id;
+			struct conn *conn;
+		}bind_mcast;
+
+		struct{
+			__u32 packet_id;
+			struct conn *conn;
+		}bind_listen;
+
+		struct{
+			__u32 packet_id;
+			struct conn *conn;
+		}bind_connect;
+	}msg;
+};
+
+/*case BIND:
+case BIND_MCAST:
+case BIND_LISTEN:
+case BIND_CONNECT:
+case FLUSH_CONN_BUF:
+case CONN_DATA:
+case CONN_EOF:*/
+struct control_msg_in{
+	__u8 type;
+
+	union{
+		struct {
+		}bind;
+
+		struct {
+		}bind_mcast;
+
+		struct {
+		}bind_listen;
+
+		struct {
+		}bind_connect;
+
+		struct {
+		}flush_buf;
+
+		struct {
+		}eof;
+	}msg;
+};
+
+/* not sent over the network - internal meaning only */
+#define MSGTYPE_ACK 1
+#define MSGTYPE_CONNECT 2
+#define MSGTYPE_FLUSH_BUF 3
+#define MSGTYPE_EOF 4
+#define MSGTYPE_RESET 5
+#define MSGTYPE_RESET_BW 6
+
+struct control_msg_out{
+	struct neighbor *nb;
+	struct control_msg_out *next;
+	__u8 type;
+	union{
+		struct{
+			__u32 packet_id;
+			struct conn *conn;
+		}ack;
+
+		struct{
+			struct conn *conn;
+		}connect;
+
+		struct{
+			__u16 offset;
+			__u32 packet_id;
+			struct conn *conn;
+		}flush_buf;
+
+		struct{
+			struct conn *conn;
+		}eof;
+
+		struct{
+			__u32 packet_id;
+			struct conn *conn;
+		}reset;
+
+		struct{
+			__u32 sent_packet_id;
+		}reset_bw;
+	}msg;
+	__u32 timedue;
+};
+
+struct packet_id_in{
+	struct htab_entry entry;
+	struct conn *conn;
+	struct packet_id_in *next;
+
+	union{
+		struct sk_buff *skb;
+		struct conn_control_msg_in *cm;
+	} rcv;
+#define PIDIN_SKB 0
+#define PIDIN_CM 1
+	__u8 type:1;
+
+	__u32 packet_id;
+
+};
+
+struct packet_id_out{
+	struct packet_id_out *next;
+	__u32 num_ids;
+	__u32 offset;
+	/* __u32 packet_ids[num_ids] attached */
+};
+
+struct neighbor{
+	struct neighbor *next;
+	struct neighbor *prev;
+
+	struct net_device *dev;
+	char mac[6];
+
+	#warning semaphore/mutex?, TODO:timer
+	struct timer_list cmsg_timer;
+	spinlock_t cmsg_lock;
+	struct control_msg_out *first_cm;
+	struct control_msg_out *last_cm;
+	__u32 timedue;
+	__u32 length;
+
+	__u32 ooo_packets;
+
+	__u32 latency;
+
+	/*
+	 * connecions which receive data from/send data to this node
+	 * used when terminating all connections of a neighbor
+	 */
+	struct conn *rcv;
+	struct conn *snd;
+
+	/*
+	 * the timer has to be inited when adding the neighbor
+	 * init_timer(struct timer_list * timer);
+	 * add_timer(struct timer_list * timer);
+	 */
+	spinlock_t retrans_lock;
+	struct timer_list retrans_timer;
+
+	/*
+	 * next_retransmit are linked with
+	 * skb_procstate->funcstate.retransmit_queue
+	 * because the sk_buff next/prev fields are needed by the hashtable
+	 */
+	struct sk_buff *first_retransmit;
+	struct sk_buff *last_retransmit;
+	struct htable all_retransmits;
+};
+
+
+
+struct conn_qdisc_info;
+
+/*
+ * When deleting an element from the list, we can simply do
+ * el->next->prev=el->prev; el->prev->next=el->next;
+ * This is because the struct conn_qdisc_list is just another element in the
+ * list. If the list is empty both "first" and "last" points to itself.
+ */
+struct conn_qdisc_list{
+	struct conn_qdisc_info *first;
+	struct conn_qdisc_info *last;
+};
+
+struct conn_qdisc_listelement{
+	struct conn_qdisc_info *next;
+	struct conn_qdisc_info *prev;
+};
+
+struct cor_sched_data{
+	/* This struct has to be first! See cor.h */
+	struct conn_qdisc_list cql;
+
+	spinlock_t lock;
+};
+
+/* see qdisc.c */
+struct conn_qdisc_info{
+	/* This has to be first! */
+	struct conn_qdisc_listelement le;
+
+	spinlock_t lock;
+
+	struct sk_buff_head queue;
+
+	psched_time_t last_rcv_time;
+	psched_time_t last_snd_time;
+//	PSCHED_GET_TIME(now);
+
+	__u32 bytes_queued;
+	__u32 bytes_queued_avg;
+
+	/* state */
+	__u32 credits;
+
+	/* per second */
+	__u32 credit_in;
+	__u32 credit_out;
+
+	/* credit status of the sender/recp (for balancing credits)*/
+	__u32 credit_sender;
+	__u32 credit_recp;
+
+	__u32 avg_snd_cost;
+
+	__u32 avg_rate;
+
+	__u8 	credit_flowdirection:2,
+		qdisc_active:1;
+};
+
+/* these are the non sent, non resized packets */
+struct resize_buf{
+	spinlock_t lock;
+	__u32 totalsize;
+	struct sk_buff *skb_first;
+	struct sk_buff *skb_last;
+};
+
+struct conn{
+	/* when zero we can free this struct */
+	atomic_t refs;
+	// = ATOMIC_INIT(0);
+
+
+#define SOURCE_CONN 1
+#define SOURCE_IN 2
+#define SOURCE_SOCK 3
+
+
+#define TARGET_UNBOUND 1
+#define TARGET_RANDOMPORT 2
+#define TARGET_CHOOSENPORT 3
+
+#define TARGET_MULTIPLE 4
+#define TARGET_OUT 5
+#define TARGET_SOCK 6
+#define TARGET_KERNEL 7
+	__u8	sourcetype:4,
+		targettype:4;
+
+
+#define BINDTYPE_BIND 0
+#define BINDTYPE_LISTEN 1
+#define BINDTYPE_MCAST 2
+#define BINDTYPE_CONNECTED 3
+	__u8	bindtype;
+
+	struct bindnode *bind_node;
+	struct conn *nextbindconn;
+	struct conn *prevbindconn;
+
+	union{
+		/* from another conn with targettype out_multiple */
+		struct{
+			struct conn *src;
+			/* "linked" list of multiple targets */
+			struct conn *next;
+			struct conn *prev;
+		}conn;
+
+		struct{
+			struct neighbor *nb;
+			/* list of all connections from this neighbor */
+			struct conn *next;
+			struct conn *prev;
+
+			struct packet_id_in *next_packet_id;
+
+			spinlock_t reorder_lock;
+
+			__u32 ooo_packets;
+		}in;
+
+		struct{
+			wait_queue_head_t *wait;
+			struct sk_buff_head queue;
+		}sock;
+	}source;
+
+	union{
+		/* TARGET_UNBOUND, TARGET_RANDOMPORT, TARGET_CHOOSENPORT */
+		struct{
+			__u32 paramlength;
+			__u32 cmdread;
+			__u16 cmd;
+			__u8 *cmdparams;
+		}unconnected;
+
+		struct{
+			struct conn *firsttarget;
+			__u32 numtargets;
+		}multiple;
+
+		struct{
+			/* has to be first (because it is first in kernel) */
+			struct neighbor *nb;
+			/* list of all connections to this neighbor */
+			struct conn *next;
+			struct conn *prev;
+
+			spinlock_t pid_lock;
+			struct packet_id_out *next_pid;
+			struct packet_id_out *last_pid;
+			/* see packet id definition */
+			__u16 last_packet_id_cnt;
+			__u8 packet_id_sent:1;
+
+			struct resize_buf rb;
+			struct conn_qdisc_info qi;
+		}out;
+
+		struct{
+			struct sk_buff_head *queue;
+			wait_queue_head_t *wait;
+			spinlock_t lock;
+		}sock;
+
+		struct{
+			struct neighbor *nb;
+		}kernel;
+	}target;
+
+	struct conn *reverstdir;
+};
+
+struct bindnode{
+	struct bindnode *next;
+	struct bindnode *prev;
+
+	struct conn *owner;
+
+	struct conn *firstboundconn;
+
+	__u16 addrtype;
+
+	__u16 addrlen;
+	__u8 *addr;
+};
+
+/* inside skb->cb */
+struct skb_procstate{
+
+	struct conn *conn;
+
+	/**
+	 * old???
+	 * Packets generated from resizing:
+	 * packet_id packet_id: the first packet used to assemble this packet
+	 * next_packet_id: the first received packet which is *not* fully stored
+	 *   in this skb (maybe it isn't stored in this packet at all)
+	 */
+
+	union{
+		struct{
+			struct sk_buff *next;
+			struct sk_buff *prev;
+			__u32 timeout;
+			__u32 sent_pid;
+		}retransmit_queue;
+
+		struct{
+			__u32 credits_used;/* for requeueing */
+		}out;
+	}funcstate;
+};
+
+
+/* common.c */
+extern int search_htab(struct htable *ht, __u32 key, __u32 key_offset,
+		struct htab_entry **element);
+
+extern void init_htable(struct htable *ht);
+
+extern void bind(struct conn *conn, __u16 addrlen, __u8 *addr);
+
+extern int multicast(struct conn *conn);
+
+extern int listen(struct conn *conn);
+
+extern int connect(struct conn *conn);
+
+extern int connect_success(struct conn *conn);
+
+extern void dec_connrefs(struct conn *conn);
+
+/* rcv.c */
+extern int put_packet_id_def(struct sk_buff *skb, struct conn *conn,
+		int spaceleft);
+
+/* kpacket_parse.c */
+extern void kernel_packet(struct sk_buff *skb, __u32 packet_id);
+
+/* kpacket_gen.c */
+extern void flush(struct conn *conn, __u16 offset, __u32 timeout);
+
+extern void eof(struct conn *conn, __u32 timeout);
+
+extern void reset(struct conn *conn);
+
+extern void reset_bw(struct neighbor *nb, __u32 packet_id);
+
+extern void send_ack(struct neighbor *nb, __u32 packet_id);
+
+/* snd.c */
+extern struct sk_buff * create_packet(struct conn *target, int size,
+		int alloc_flags);
+
+extern void send_packet(struct sk_buff *skb);
+
+extern struct conn *ack_received(struct neighbor *nb, __u32 packet_id,
+		int nack);
+
+extern struct conn *nack_received(struct neighbor *nb, __u32 packet_id);
+
+
+
+static inline struct skb_procstate * skb_pstate(struct sk_buff **skb)
+{
+	return (struct skb_procstate *) &((*skb)->cb[0]);
+}
+
+
+static inline __u32 mss(struct neighbor *nb)
+{
+	return nb->dev->mtu - LL_RESERVED_SPACE(nb->dev) - 4;
+}
+
+
+extern struct htable pid_table;
+
+static inline struct packet_id_in * get_packet_id(__u32 packet_id)
+{
+	struct packet_id_in *ret = 0;
+	__u32 offset = offsetof(struct packet_id_in, packet_id);
+
+	search_htab(&pid_table, packet_id, offset, (struct htab_entry **)&ret);
+
+	return ret;
+}
+
diff -u -r -N ./net/cor/cpacket_gen.c ./net/cor/cpacket_gen.c
--- ./net/cor/cpacket_gen.c	1970-01-01 01:00:00.000000000 +0100
+++ ./net/cor/cpacket_gen.c	2008-01-01 15:57:59.000000000 +0100
@@ -0,0 +1,26 @@
+#include "cor.h"
+
+#warning todo
+void add_bind(__u8 type)
+{
+	char *dst;
+
+	/*dst = skb_put(skb, 1);*/
+	BUG_TRAP(0 == dst);
+
+	switch (type) {
+	case MSGTYPE_BINDMCAST:
+		dst[0] = CD_BIND_MCAST;
+		break;
+	case MSGTYPE_BINDLISTEN:
+		dst[0] = CD_BIND_LISTEN;
+		break;
+	case MSGTYPE_BINDCONNECT:
+		dst[0] = CD_BIND_CONNECT;
+		break;
+	default:
+		BUG();
+	}
+
+	return 5;
+}
diff -u -r -N ./net/cor/cpacket_parse.c ./net/cor/cpacket_parse.c
--- ./net/cor/cpacket_parse.c	1970-01-01 01:00:00.000000000 +0100
+++ ./net/cor/cpacket_parse.c	2008-01-01 15:57:59.000000000 +0100
@@ -0,0 +1,71 @@
+#include "cor.h"
+
+static void parse_cmd(struct sk_buff *skb, struct conn *conn, __u8 code)
+{
+	if (code == CD_BIND) {
+		parse_bind(skb, conn);
+	} else if (code == CD_BIND_MCAST) {
+		multicast(conn);
+	} else if (code == CD_BIND_LISTEN) {
+		multicast(conn);
+	} else if (code == CD_BIND_CONNECT) {
+		multicast(conn);
+	} else {
+		#warning todo ret error
+	}
+}
+
+static void read2(struct sk_buff *skb, struct conn *conn)
+{
+}
+
+static void read1(struct sk_buff *skb, struct conn *conn)
+{
+	char *data;
+	int pull;
+	int readoffset = 0;
+
+	if (6 <= conn->target.unconnected.cmdread)
+		return;
+	
+	BUG_TRAP(0 != conn->target.unconnected.cmdparams);
+
+	pull = min(6 - conn->target.unconnected.cmdread, skb->len);
+	data = skb_pull(skb, 6 - conn->target.unconnected.cmdread);
+
+	switch (conn->target.unconnected.cmdread) {
+	case 0:
+		conn->target.unconnected.paramlength = data[readoffset++];
+	case 1:
+		conn->target.unconnected.paramlength <<= 8;
+		conn->target.unconnected.paramlength += data[readoffset++];
+	case 2:
+		conn->target.unconnected.paramlength <<= 8;
+		conn->target.unconnected.paramlength += data[readoffset++];
+	case 3:
+		conn->target.unconnected.paramlength <<= 8;
+		conn->target.unconnected.paramlength += data[readoffset++];
+	case 4:
+		conn->target.unconnected.paramlength = data[readoffset++];
+	case 5:
+		conn->target.unconnected.paramlength <<= 8;
+		conn->target.unconnected.paramlength += data[readoffset++];
+		break;
+	default:
+		BUG();
+	}
+
+	conn->target.unconnected.cmdread += pull;
+}
+
+void parse(struct sk_buff *skb, struct conn *conn)
+{
+	BUG_TRAP(conn->targettype != TARGET_UNBOUND &&
+			conn->targettype != TARGET_RANDOMPORT &&
+			conn->targettype != TARGET_CHOOSENPORT);
+
+	read1(skb, conn);
+	read2(skb, conn);
+
+	#warning todo
+}
diff -u -r -N ./net/cor/kpacket_gen.c ./net/cor/kpacket_gen.c
--- ./net/cor/kpacket_gen.c	1970-01-01 01:00:00.000000000 +0100
+++ ./net/cor/kpacket_gen.c	2008-01-01 15:57:59.000000000 +0100
@@ -0,0 +1,274 @@
+#include "cor.h"
+#include "settings.h"
+
+
+static int add_ack(struct sk_buff *skb, struct control_msg_out *cm,
+		int spaceleft)
+{
+	int rc;
+
+	char *dst;
+
+	char *pid = (char *) &(cm->msg.ack.packet_id);
+
+	if (spaceleft < 5)
+		return 0;
+
+	dst = skb_put(skb, 5);
+	BUG_TRAP(0 == dst);
+
+	rc = put_packet_id_def(skb, cm->msg.ack.conn, spaceleft - 5);
+
+	dst[0] = rc == 0 ? KP_ACK1 : KP_ACK3;
+	dst[1] = pid[0];
+	dst[2] = pid[1];
+	dst[3] = pid[2];
+	dst[4] = pid[3];
+
+	return 5 + rc;
+}
+
+static int add_connect(struct sk_buff *skb, struct control_msg_out *cm,
+		int spaceleft)
+{
+	int rc;
+
+	char *dst;
+
+	if (spaceleft < 2 + PACKET_IDS_ON_CONNECT * 4)
+		return 0;
+
+	dst = skb_put(skb, 1);
+	BUG_TRAP(0 == dst);
+
+	dst[0] = KP_CONNECT;
+
+	rc = put_packet_id_def(skb, cm->msg.connect.conn, spaceleft - 1);
+
+	return 1 + rc;
+}
+
+static int add_flush_buf(struct sk_buff *skb, struct control_msg_out *cm,
+		int spaceleft)
+{
+	char *dst;
+
+	char *pid = (char *) &(cm->msg.flush_buf.packet_id);
+	__u16 offset = ntohs(cm->msg.flush_buf.offset);
+
+	if (spaceleft < 7)
+		return 0;
+
+	dst = skb_put(skb, 7);
+	BUG_TRAP(0 == dst);
+
+	dst[0] = KP_FLUSH_CONN_BUF;
+	dst[1] = pid[0];
+	dst[2] = pid[1];
+	dst[3] = pid[2];
+	dst[4] = pid[3];
+	dst[5] = ((char *)(&offset))[0];
+	dst[6] = ((char *)(&offset))[1];
+
+	return 7;
+}
+
+static int add_eof(struct sk_buff *skb, struct control_msg_out *cm,
+		int spaceleft)
+{
+	#warning todo
+}
+
+static int add_reset(struct sk_buff *skb, struct control_msg_out *cm,
+		int spaceleft)
+{
+	char *dst;
+
+	char *pid = (char *) &(cm->msg.reset.packet_id);
+
+	if (spaceleft < 5)
+		return 0;
+
+	dst = skb_put(skb, 5);
+	BUG_TRAP(0 == dst);
+
+	dst[0] = KP_CONN_RESET_FW;
+	dst[1] = pid[0];
+	dst[2] = pid[1];
+	dst[3] = pid[2];
+	dst[4] = pid[3];
+
+	return 5;
+}
+
+static int add_reset_bw(struct sk_buff *skb, struct control_msg_out *cm,
+		int spaceleft)
+{
+	char *dst;
+
+	char *pid = (char *) &(cm->msg.reset_bw.sent_packet_id);
+
+	if (spaceleft < 5)
+		return 0;
+
+	dst = skb_put(skb, 5);
+	BUG_TRAP(0 == dst);
+
+	dst[0] = KP_CONN_RESET_BW;
+	dst[1] = pid[0];
+	dst[2] = pid[1];
+	dst[3] = pid[2];
+	dst[4] = pid[3];
+
+	return 5;
+}
+
+
+static int add_message(struct sk_buff *skb, struct control_msg_out *cm,
+		int spaceleft)
+{
+	switch (cm->type) {
+	case MSGTYPE_ACK:
+		return add_ack(skb, cm, spaceleft);
+	case MSGTYPE_CONNECT:
+		return add_connect(skb, cm, spaceleft);
+	case MSGTYPE_FLUSH_BUF:
+		return add_flush_buf(skb, cm, spaceleft);
+	case MSGTYPE_EOF:
+		return add_eof(skb, cm, spaceleft);
+	case MSGTYPE_RESET:
+		return add_reset(skb, cm, spaceleft);
+	case MSGTYPE_RESET_BW:
+		return add_reset_bw(skb, cm, spaceleft);
+	default:
+		BUG();
+	}
+}
+
+static void padding(struct sk_buff *skb, int length)
+{
+	char *dst = skb_put(skb, length);
+	BUG_TRAP(0 == dst);
+	memset(dst, KP_PADDING, length);
+}
+
+static void send_messages(struct neighbor *nb, struct sk_buff *skb,
+		int spaceleft)
+{
+	int length = 0;
+	struct control_msg_out *old;
+
+	while (0 != nb->first_cm) {
+		int rc = add_message(skb, nb->first_cm, spaceleft - length);
+		if (0 == rc)
+			goto send_packet;
+		length += rc;
+		old = nb->first_cm;
+		nb->first_cm = nb->first_cm->next;
+		kfree(old);
+	}
+
+	nb->last_cm = 0;
+send_packet:
+	nb->length -= length;
+
+	padding(skb, spaceleft - length);
+	send_packet(skb);
+}
+
+static void controlmsg_timerfunc(unsigned long arg)
+{
+	unsigned long iflags;
+
+	struct neighbor *nb = (struct neighbor *) arg;
+
+	int targetmss = mss(nb);
+	int size = targetmss;
+
+	struct sk_buff *skb;
+	struct control_msg_out *curr;
+
+	int sent = 0;
+
+	spin_lock_irqsave( &(nb->cmsg_lock), iflags );
+
+	while (nb->length >= targetmss) {
+send:
+		sent = 1;
+		/* skb = create_packet */
+		send_messages(nb, skb, size);
+	}
+
+	curr = nb->first_cm;
+
+	while (1 == sent && 0 != curr) {
+		if (time_before_eq(curr->timedue, jiffies))
+			goto send;
+		else if (time_before(curr->timedue, nb->timedue))
+			nb->timedue = curr->timedue;
+
+		curr = curr->next;
+	}
+
+	spin_unlock_irqrestore( &(nb->cmsg_lock), iflags );
+
+	mod_timer(&(nb->cmsg_timer), nb->timedue);
+}
+
+static void add_control_msg(struct control_msg_out *msg, __u32 length,
+		struct neighbor *nb)
+{
+	long iflags;
+	spin_lock_irqsave( &(nb->cmsg_lock), iflags );
+
+	if (msg == 0)
+		goto length;
+
+	if (unlikely(0 == nb->first_cm)) {
+		BUG_TRAP(0 != nb->last_cm);
+		nb->first_cm = nb->last_cm = msg;
+	} else {
+		nb->last_cm->next = msg;
+		nb->last_cm = msg;
+	}
+
+	if (unlikely(time_before(msg->timedue, nb->timedue))) {
+		nb->timedue = msg->timedue;
+		mod_timer(&(nb->cmsg_timer), nb->timedue);
+	}
+
+length:
+	nb->length += length;
+	if (unlikely(nb->length >= mss(nb)))
+		controlmsg_timerfunc((long) nb);
+
+	spin_unlock_irqrestore( &(nb->cmsg_lock), iflags );
+}
+
+
+void flush(struct conn *conn, __u16 offset, __u32 timeout)
+{
+
+}
+
+void eof(struct conn *conn, __u32 timeout)
+{
+
+}
+
+void reset(struct conn *conn)
+{
+	struct control_msg_out *cm = kmalloc(sizeof(struct control_msg_out),
+			GFP_KERNEL);
+	cm->type = MSGTYPE_RESET;
+	cm->msg.reset.conn = conn;
+	#warning todo pid, send
+}
+
+void reset_bw(struct neighbor *nb, __u32 packet_id)
+{
+}
+
+void send_ack(struct neighbor *nb, __u32 packet_id)
+{
+}
diff -u -r -N ./net/cor/kpacket_parse.c ./net/cor/kpacket_parse.c
--- ./net/cor/kpacket_parse.c	1970-01-01 01:00:00.000000000 +0100
+++ ./net/cor/kpacket_parse.c	2008-01-01 15:57:59.000000000 +0100
@@ -0,0 +1,274 @@
+#include "cor.h"
+
+static __u32 pull_u32(struct sk_buff *skb)
+{
+	char *ptr = skb_pull(skb, 4);
+
+	__u32 ret = 0;
+
+	BUG_TRAP(0 == ptr);
+
+	((char *)&ret)[0] = ptr[0];
+	((char *)&ret)[1] = ptr[1];
+	((char *)&ret)[2] = ptr[2];
+	((char *)&ret)[3] = ptr[3];
+
+	return ret;
+}
+
+static __u16 pull_u16(struct sk_buff *skb)
+{
+	char *ptr = skb_pull(skb, 2);
+
+	__u16 ret = 0;
+
+	BUG_TRAP(0 == ptr);
+
+	((char *)&ret)[0] = ptr[0];
+	((char *)&ret)[1] = ptr[1];
+
+	return ret;
+}
+
+static void set_speed(struct conn *conn, __u16 speedinfo)
+{
+}
+
+static void add_packet_ids(struct conn *conn, struct sk_buff *skb)
+{
+}
+
+static void parse_ack(struct neighbor *nb, struct sk_buff *skb, __u8 code)
+{
+	struct conn *conn;	
+	__u32 packet_id = pull_u32(skb);
+
+	if (likely(code == KP_ACK1 || code == KP_ACK2 || code == KP_ACK3 ||
+			code == KP_ACK4)) {
+		conn = ack_received(nb, packet_id, 0);
+	} else if (code == KP_NACK1 || code == KP_NACK2 || code == KP_NACK3 ||
+			code == KP_NACK4) {
+		conn = ack_received(nb, packet_id, 1);
+	} else{
+		BUG();
+	}
+
+	if (unlikely(code == KP_ACK2 || code == KP_ACK4 || code == KP_NACK2 ||
+			code == KP_NACK4)) {
+		__u16 speedinfo = ntohs(pull_u16(skb));
+		set_speed(conn, speedinfo);
+	}
+
+	if (unlikely(code == KP_ACK3 || code == KP_ACK4 || code == KP_NACK3 ||
+			code == KP_NACK4)) {
+		add_packet_ids(conn, skb);
+	}
+}
+
+static void parse_bind(struct sk_buff *skb, struct conn *conn)
+{
+	__u8 *num_binds_ptr = skb_pull(skb, 1);
+	__u8 num_binds;
+	BUG_TRAP(num_binds_ptr == 0);
+	num_binds = *num_binds_ptr;
+	for (; num_binds > 0; num_binds--) {
+		__u16 addrlen = ntohs(pull_u16(skb));
+		__u8 *addr;
+		addr = skb_pull(skb, addrlen);
+		BUG_TRAP(addr == 0);
+		bind(conn, addrlen, addr);
+	}
+}
+
+
+void kernel_packet2(struct sk_buff *skb, __u32 packet_id)
+{
+	struct skb_procstate *ps = skb_pstate(&skb);
+	struct neighbor *nb = ps->conn->target.kernel.nb;
+	int ack = 0;
+	while (1) {
+		__u32 packet_id;
+		struct packet_id_in *pid;
+
+		__u8 *codeptr = skb_pull(skb, 1);
+		__u8 code;
+		BUG_TRAP(codeptr == 0);
+		code = *codeptr;
+
+		switch (code) {
+		case KP_PADDING:
+			break;
+		case KP_ACK1:
+		case KP_ACK2:
+		case KP_ACK3:
+		case KP_ACK4:
+		case KP_NACK1:
+		case KP_NACK2:
+		case KP_NACK3:
+		case KP_NACK4:
+			parse_ack(nb, skb, code);
+			break;
+		case KP_CONNECT:
+			#warning todo create conn/add_packet_ids
+			break;
+		case KP_CONNECT_SUCCESS:
+			#warning todo create reverse dir connection
+			break;
+		case KP_CONNECT_FAILED_TEMP:
+		case KP_CONNECT_FAILED_PERM:
+			#warning todo
+			break;
+		case KP_FLUSH_CONN_BUF:
+		case KP_CONN_DATA:
+		case KP_CONN_EOF:
+			#warning todo is next packet?
+			#warning todo move to next packet
+			packet_id = pull_u32(skb);
+			pid = get_packet_id(packet_id);
+			if (code == KP_FLUSH_CONN_BUF)
+				#warning todo offset, timeout
+				flush(pid->conn, 0, 10);
+			else if (code == KP_CONN_DATA) {
+				#warning todo
+				//skb_trim(skb, len);
+			} else if (code == KP_CONN_EOF)
+				#warning todo timeout
+				eof(pid->conn, 10);
+			else if (code == KP_CONN_RESET_FW)
+				reset(pid->conn);
+			else
+				BUG();
+			break;
+		case KP_CONN_RESET_BW:
+			packet_id = pull_u32(skb);
+			reset_bw(nb, packet_id);
+			break;
+		default:
+			#warning todo
+			BUG();
+		}
+	}
+
+	if (ack)
+		send_ack(nb, packet_id);
+}
+
+static int bind_length(struct sk_buff *skb2)
+{
+	int u;
+	__u8 *num_binds = skb_pull(skb2, 1);
+	if (num_binds == 0)
+		return 1;
+	for (u = 0; u < (*num_binds); u++) {
+		__u16 *lenptr = (__u16 *) skb_pull(skb2, 2);
+		__u16 len;
+		__u8 *addr;
+		if (lenptr == 0)
+			return 1;
+		len = ntohs(*lenptr);
+		addr = skb_pull(skb2, len);
+		if (addr == 0)
+			return 1;
+	}
+	return 0;
+}
+
+static int packet_id_def_length(struct sk_buff *skb2)
+{
+	__u8 *num_ids = skb_pull(skb2, 1);
+	__u8 *ids;
+	if (num_ids == 0)
+		return 1;
+	ids = skb_pull(skb2, 4 * (*num_ids));
+	if (ids == 0)
+		return 1;
+	return 0;
+}
+
+void kernel_packet(struct sk_buff *skb, __u32 packet_id)
+{
+	struct sk_buff *skb2 = skb_clone(skb, __GFP_DMA | GFP_KERNEL);
+
+	while (1) {
+		__u8 *codeptr = skb_pull(skb2, 1);
+		__u8 code;
+
+		__u8 *lengthptr;
+		__u8 length;
+
+		if (codeptr == 0)
+			goto discard;
+		code = *codeptr;
+		switch (code) {
+		case KP_PADDING:
+			break;
+		case KP_ACK1:
+		case KP_NACK1:
+			if (skb_pull(skb2, 4) == 0)
+				goto discard;
+			break;
+		case KP_ACK2:
+		case KP_NACK2:
+			if (skb_pull(skb2, 6) == 0)
+				goto discard;
+			break;
+		case KP_ACK3:
+		case KP_NACK3:
+			if (skb_pull(skb2, 4) == 0)
+				goto discard;
+			if (packet_id_def_length(skb2))
+				goto discard;
+			break;
+		case KP_ACK4:
+		case KP_NACK4:
+			if (skb_pull(skb2, 6) == 0)
+				goto discard;
+			if (packet_id_def_length(skb2))
+				goto discard;
+			break;
+		case KP_CONNECT:
+			if (packet_id_def_length(skb2))
+				goto discard;
+			break;
+		case KP_CONNECT_SUCCESS:
+			if (skb_pull(skb2, 4) == 0)
+				goto discard;
+			if (packet_id_def_length(skb2))
+				goto discard;
+			break;
+		case KP_CONNECT_FAILED_TEMP:
+		case KP_CONNECT_FAILED_PERM:
+			if (skb_pull(skb2, 4) == 0)
+				goto discard;
+			break;
+		case KP_FLUSH_CONN_BUF:
+			if (skb_pull(skb2, 9) == 0)
+				goto discard;
+			break;
+		case KP_CONN_DATA:
+			if (skb_pull(skb2, 4) == 0)
+				goto discard;
+			lengthptr = skb_pull(skb2, 2);
+			if (lengthptr == 0)
+				goto discard;
+			length = ntohs(*lengthptr);
+			if (skb_pull(skb, length) == 0)
+				goto discard;
+			break;
+		case KP_CONN_EOF:
+		case KP_CONN_RESET_FW:
+		case KP_CONN_RESET_BW:
+			if (skb_pull(skb2, 4) == 0)
+				goto discard;
+			break;
+		default:
+			goto discard;
+		}
+	}
+	kfree_skb(skb2);
+	kernel_packet2(skb, packet_id);
+	kfree_skb(skb);
+discard:
+	kfree_skb(skb2);
+	kfree_skb(skb);
+}
diff -u -r -N ./net/cor/qdisc.c ./net/cor/qdisc.c
--- ./net/cor/qdisc.c	1970-01-01 01:00:00.000000000 +0100
+++ ./net/cor/qdisc.c	2008-01-01 15:57:59.000000000 +0100
@@ -0,0 +1,195 @@
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <net/pkt_sched.h>
+#include <net/inet_ecn.h>
+
+#include "cor.h"
+#include "settings.h"
+
+struct cor_qdisc_opts{
+	//none???
+};
+
+
+static struct sk_buff * cor_dequeue(struct Qdisc *sch)
+{
+	struct sk_buff *ret;
+
+	struct cor_sched_data *q = qdisc_priv(sch);
+
+	struct conn_qdisc_info *curr = q->cql.first;
+	struct conn_qdisc_info *best = curr;
+
+	__u64 currcost_limit = 0;
+	__u64 currcost = 0;
+
+	spin_lock(&(q->lock));//irqsave???
+
+	while (curr != 0) {
+		__u32 max1 = (256 * ((__u64)curr->credits)) /
+				((__u64)curr->bytes_queued + curr->avg_rate);
+
+		__u32 max2 = (256 * ((__u64)curr->credits +
+				curr->credit_in - curr->credit_out)) /
+				((__u64)curr->bytes_queued + 2*curr->avg_rate);
+
+		__u32 maxcost = max(0, min((max1), (max2)));
+
+		if (maxcost > currcost_limit) {
+			currcost = currcost_limit;
+			currcost_limit = maxcost;
+			best = curr;
+		}
+	}
+
+	best->credits -= currcost;
+
+	ret = __skb_dequeue(&(best->queue));
+	spin_unlock(&(q->lock));
+
+	if (likely(ret != NULL)) {
+		sch->qstats.backlog -= ret->len;
+		sch->q.qlen--;
+	}
+
+	return ret;
+}
+
+
+static void activate_conn(struct cor_sched_data *q, struct conn_qdisc_info *qi)
+{
+	if (unlikely(qi->qdisc_active == 0)) {
+		spin_lock(&(q->lock));
+
+		qi->le.next = (struct conn_qdisc_info *) &(q->cql);
+		qi->le.prev = q->cql.last;
+		q->cql.last = qi;
+
+		if (unlikely(q->cql.first))
+			q->cql.first = q->cql.last;
+
+		spin_unlock(&(q->lock));
+	}
+}
+
+static int cor_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+{
+	struct cor_sched_data *q = qdisc_priv(sch);
+	struct conn_qdisc_info *qi;
+
+	BUG_TRAP(skb_pstate(&skb)->conn->targettype != TARGET_OUT);
+
+	qi = &(skb_pstate(&skb)->conn->target.out.qi);
+
+	spin_lock(&(qi->lock));
+
+	__skb_queue_tail(&(qi->queue), skb);
+	activate_conn(q, qi);
+
+	spin_unlock(&(qi->lock));
+
+	sch->bstats.bytes += skb->len;
+	sch->bstats.packets++;
+	sch->q.qlen++;
+
+	return NET_XMIT_SUCCESS;
+}
+
+static int cor_requeue(struct sk_buff *skb, struct Qdisc *sch)
+{
+	struct cor_sched_data *q = qdisc_priv(sch);
+	struct conn_qdisc_info *qi;
+
+	BUG_TRAP(skb_pstate(&skb)->conn->targettype != TARGET_OUT);
+
+	qi = &(skb_pstate(&skb)->conn->target.out.qi);
+
+	spin_lock(&(qi->lock));
+
+	__skb_queue_head(&(qi->queue), skb);
+	activate_conn(q, qi);
+
+	spin_unlock(&(qi->lock));
+
+	sch->qstats.requeues++;
+	sch->q.qlen++;
+
+	return NET_XMIT_SUCCESS;
+}
+
+static void init_scheddata(struct cor_sched_data *q)
+{
+	#warning TODO
+}
+
+static void cor_reset(struct Qdisc *sch)
+{
+	#warning TODO terminate connections, free all packets and delete all connection stats
+}
+
+static void cor_destroy(struct Qdisc *sch)
+{
+	//free ressources
+}
+
+static int cor_change(struct Qdisc *sch, struct rtattr *opt)
+{
+	struct cor_qdisc_opts *ctl = RTA_DATA(opt);
+	if (RTA_PAYLOAD(opt) < sizeof(*ctl))
+		return -EINVAL;
+
+	return 0;
+}
+
+static int cor_init(struct Qdisc *sch, struct rtattr *opt)
+{
+	struct cor_sched_data *q = qdisc_priv(sch);
+	init_scheddata(q);
+	spin_lock_init( &(q->lock) );
+	return cor_change(sch, opt);
+}
+
+static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
+{
+	/*struct red_sched_data *q = qdisc_priv(sch);
+	struct tc_red_xstats st = {
+		.early	= q->stats.prob_drop + q->stats.forced_drop,
+		.pdrop	= q->stats.pdrop,
+		.other	= q->stats.other,
+		.marked	= q->stats.prob_mark + q->stats.forced_mark,
+	};
+
+	return gnet_stats_copy_app(d, &st, sizeof(st));*/
+	return 0;
+}
+
+static struct Qdisc_ops cor_qdisc_ops = {
+	.id		=	"cor",
+	.priv_size	=	sizeof(struct cor_sched_data),
+	.cl_ops		=	0,
+	.enqueue	=	cor_enqueue,
+	.dequeue	=	cor_dequeue,
+	.requeue	=	cor_requeue,
+	.drop		=	0,
+	.init		=	cor_init,
+	.reset		=	cor_reset,
+	.destroy	=	cor_destroy,
+	.change		=	cor_change,
+	.dump_stats	=	red_dump_stats,
+	.owner		=	THIS_MODULE,
+};
+
+static int __init cor_module_init(void)
+{
+	return register_qdisc(&cor_qdisc_ops);
+}
+
+static void __exit cor_module_exit(void)
+{
+	unregister_qdisc(&cor_qdisc_ops);
+}
+
+module_init(cor_module_init)
+module_exit(cor_module_exit)
+
+MODULE_LICENSE("GPL");
diff -u -r -N ./net/cor/rcv.c ./net/cor/rcv.c
--- ./net/cor/rcv.c	1970-01-01 01:00:00.000000000 +0100
+++ ./net/cor/rcv.c	2008-01-01 15:57:59.000000000 +0100
@@ -0,0 +1,349 @@
+#ifndef _KERNEL_
+	#define _KERNEL_
+#endif
+
+#ifndef MODULE
+	#define MODULE
+#endif
+
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/in.h>
+
+
+#include "cor.h"
+#include "settings.h"
+
+
+struct packet_queue{
+	/*
+	 *  This is a single linked list!!!!!
+	 */
+	struct sk_buff *head;
+	struct sk_buff *tail;
+
+	spinlock_t lock;
+
+	wait_queue_head_t readers;
+};
+
+struct packet_queue rcv_queue;
+
+struct htable pid_table;
+
+atomic_t ooo_packets = ATOMIC_INIT(0);
+
+#warning todo right side of conn?
+int put_packet_id_def(struct sk_buff *skb, struct conn *conn, int spaceleft)
+{
+}
+
+static void local_delivery(struct sk_buff *skb)
+{
+	#warning todo
+	#warning multiple chained skbs
+/*	struct skb_procstate *ps=skb_pstate(&(skb));
+	struct cor_sock *sk=&(ps->funcstate.target.sock);
+	#error
+	__skb_queue_tail(&sk->sk_receive_queue, skb);
+	sk->sk_data_ready(sk, skb->length);*/
+}
+
+static void send_rbuf(struct conn *conn)
+{
+	struct resize_buf *rbuf = &(conn->target.out.rb);
+	int targetmss = mss(conn->target.out.nb);
+
+	while (rbuf->totalsize >= targetmss) {
+		struct sk_buff *newskb = create_packet(conn, targetmss,
+				GFP_ATOMIC);
+		int totalcopy;
+		for (totalcopy = 0; totalcopy < targetmss; ) {
+			struct sk_buff *skb = rbuf->skb_first;
+			int copy = min(skb->len, targetmss - totalcopy);
+
+			char *src = skb_pull(skb, copy);
+			char *dest = skb_put(newskb, copy);
+
+			memcpy(dest, src, copy);
+
+			totalcopy += copy;
+
+			if (skb->len == 0) {
+				rbuf->skb_first = skb->next;
+				kfree_skb(skb);
+			}
+		}
+
+		send_packet(newskb);
+		rbuf->totalsize -= targetmss;
+	}
+}
+
+static void add_to_rbuf(struct sk_buff *skb, struct resize_buf *rbuf)
+{
+	if (rbuf->skb_first == 0) {
+		rbuf->skb_first = skb;
+		rbuf->skb_last = skb;
+	} else {
+		rbuf->skb_last->next = skb;
+		rbuf->skb_last = skb;
+	}
+
+	rbuf->totalsize += skb->len;
+
+	while (rbuf->skb_last->next != 0) {
+		rbuf->totalsize += rbuf->skb_last->next->len;
+		rbuf->skb_last = rbuf->skb_last->next;
+	}
+}
+
+static void resize(struct sk_buff *skb)
+{
+	long iflags;
+	struct conn *conn = skb_pstate(&skb)->conn;
+	struct resize_buf *rbuf = &(conn->target.out.rb);
+	int targetmss = mss(conn->target.out.nb);
+
+	spin_lock_irqsave(&(rbuf->lock), iflags);
+
+	if (rbuf->skb_first == 0 && skb->len == targetmss) {
+		send_packet(skb);
+		goto out;
+	}
+
+	add_to_rbuf(skb, rbuf);
+	send_rbuf(conn);
+
+out:
+	spin_unlock_irqrestore(&(rbuf->lock), iflags);
+}
+
+static void apply_cm(struct sk_buff *skb, struct conn *conn)
+{
+	#warning todo
+}
+
+static void routing(struct packet_id_in *in)
+{
+	struct sk_buff *skb = in->rcv.skb;
+	struct skb_procstate *ps = skb_pstate(&(skb));
+
+	if (unlikely(PIDIN_CM == in->type)) {
+		apply_cm(in->rcv.cm, in->conn);
+		return;
+	}
+
+	switch (ps->conn->targettype) {
+	case TARGET_UNBOUND:
+	case TARGET_RANDOMPORT:
+	case TARGET_CHOOSENPORT:
+		#warning todo
+		break;
+	case TARGET_SOCK:
+		local_delivery(skb);
+		break;
+	case TARGET_OUT:
+		resize(skb);
+		break;
+	default:
+		BUG();
+	}
+}
+
+static void drain_ooo_queue(struct conn *conn)
+{
+	struct packet_id_in *curr = &(conn->source.in.next_packet_id);
+	while ((PIDIN_SKB == curr->type && 0 != curr->rcv.skb) ||
+			(PIDIN_CM == curr->type && 0 != curr->rcv.cm)) {
+
+		struct packet_id_in *old;
+
+		#warning todo curr == 0?
+
+		conn->source.in.ooo_packets--;
+
+		routing(curr);
+
+		old = curr;
+		curr = curr->next;
+		kfree(old);
+	}
+
+	BUG_TRAP(PIDIN_SKB != curr->type && PIDIN_CM != curr->type);
+	BUG_TRAP(SOURCE_IN != curr->conn->sourcetype);
+
+	curr->conn->source.in.next_packet_id = curr;
+}
+
+static int ooo_rcv(struct packet_id_in *in)
+{
+	#warning global ooo packets counter
+	if (unlikely(PIDIN_SKB == in->type &&
+			MAX_TOTAL_OOO_PER_CONN >=
+			in->conn->source.in.ooo_packets)) {
+		kfree_skb(in->rcv.skb);
+		kfree(in);
+		return 1;
+	} else if (unlikely(PIDIN_SKB == in->type &&
+			MAX_TOTAL_OOO_PER_CONN + 20 >=
+			in->conn->source.in.ooo_packets)) {
+		kfree(in->rcv.cm);
+		kfree(in);
+		return 1;
+	}
+
+	in->conn->source.in.ooo_packets++;
+	return 0;
+}
+
+static void reordering(struct packet_id_in *in)
+{
+	long iflags;
+
+	BUG_TRAP(SOURCE_IN != in->conn->sourcetype);
+
+	spin_lock_irqsave( &(in->conn->source.in.reorder_lock), iflags );
+
+	BUG_TRAP(in->conn == 0);
+	BUG_TRAP(in->conn->sourcetype != SOURCE_IN);
+
+	if (in->conn->source.in.next_packet_id != in) {
+		int rc = ooo_rcv(in);
+		if (rc == 0)
+			send_ack(in->conn->source.in.nb, in->packet_id);
+		goto out;
+	}
+
+	send_ack(in->conn->source.in.nb, in->packet_id);
+	routing(in);
+	drain_ooo_queue(in->conn);
+
+out:
+	spin_unlock_irqrestore( &(in->conn->source.in.reorder_lock), iflags );
+}
+
+static void conn_rcv(struct packet_id_in *in)
+{
+	int drop = 0;
+
+	if (drop) {
+		kfree_skb(in->rcv.skb);
+		kfree(in);
+		return;
+	}
+
+	reordering(in);
+}
+
+static void announce_packet(struct sk_buff *skb)
+{
+	#warning todo
+}
+
+static void rcv(struct sk_buff *skb)
+{
+	struct skb_procstate *ps = skb_pstate(&skb);
+	__u32 *pid = (__u32 *)skb_pull(skb, 4);
+	struct packet_id_in *pid_in;
+
+	if (0 == pid)
+		goto drop;
+
+	if (unlikely(0 == *pid)) {
+		announce_packet(skb);
+		return;
+	}
+
+	pid_in = get_packet_id(*pid);
+
+	ps->conn = pid_in->conn;
+
+	if (unlikely(0 == ps->conn))
+		goto drop;
+
+	if (unlikely(TARGET_KERNEL == ps->conn->targettype)) {
+#warning at the moment kernel packets are not reordered properly
+		kernel_packet(skb, pid_in->packet_id);
+		kfree(pid_in);
+		return;
+	}
+
+
+	pid_in->type = PIDIN_SKB;
+	pid_in->rcv.skb = skb;
+	conn_rcv(pid_in);
+
+	if (0) {
+drop:
+		kfree_skb(skb);
+	}
+}
+
+static void rcv_thread(void)
+{
+	while (1) {
+		struct sk_buff *skb = 0;
+
+		unsigned long iflags;
+		spin_lock_irqsave( &(rcv_queue.lock), iflags );
+
+		if (0 == rcv_queue.head) {
+			spin_unlock_irqrestore( &(rcv_queue.lock), iflags );
+			wait_event_interruptible(rcv_queue.readers,
+					rcv_queue.head != 0);
+			continue;
+		}
+
+		skb = rcv_queue.head;
+
+		rcv_queue.head = rcv_queue.head->next;
+		if (0 == rcv_queue.head)
+			rcv_queue.tail = 0;
+
+		spin_unlock_irqrestore( &(rcv_queue.lock), iflags );
+
+		rcv(skb);
+	}
+}
+
+
+static int queue_rcv_processing(struct sk_buff *skb, struct net_device *dev,
+		struct packet_type *pt)
+{
+	unsigned long iflags;
+
+	BUG_TRAP(skb->next != 0);
+
+	spin_lock_irqsave( &(rcv_queue.lock), iflags );
+
+	if (0 == rcv_queue.tail)
+		rcv_queue.head = rcv_queue.tail = skb;
+	else
+		rcv_queue.tail->next = skb;
+
+	spin_unlock_irqrestore( &(rcv_queue.lock), iflags );
+
+	wake_up_interruptible(&(rcv_queue.readers));
+
+	return skb->len;
+}
+
+
+static struct packet_type ptype_cor = {
+	.type = __constant_htons(ETH_P_COR),
+	.dev = 0,
+	.func = queue_rcv_processing
+};
+
+int init_module(void)
+{
+	spin_lock_init( &(rcv_queue.lock) );
+	init_htable(&pid_table);
+	dev_add_pack(&ptype_cor);
+	return 0;
+}
+
+MODULE_LICENSE("GPL");
diff -u -r -N ./net/cor/settings.h ./net/cor/settings.h
--- ./net/cor/settings.h	1970-01-01 01:00:00.000000000 +0100
+++ ./net/cor/settings.h	2008-01-01 15:57:59.000000000 +0100
@@ -0,0 +1,5 @@
+#define MAX_TOTAL_OOO_PACKETS 100
+#define MAX_TOTAL_OOO_PER_CONN 10
+#define PACKET_IDS_ON_CONNECT 32
+
+#define MAX_TOTAL_BUFFER_SIZE 256
diff -u -r -N ./net/cor/snd.c ./net/cor/snd.c
--- ./net/cor/snd.c	1970-01-01 01:00:00.000000000 +0100
+++ ./net/cor/snd.c	2008-01-01 15:57:59.000000000 +0100
@@ -0,0 +1,265 @@
+#include <linux/gfp.h>
+#include <linux/jiffies.h>
+#include <linux/slab.h>
+
+#include "cor.h"
+#include "settings.h"
+
+struct htable retransmits;
+
+static inline int retransmit_keyoffset()
+{
+	struct sk_buff tmp;
+	struct skb_procstate tmp2;
+
+	return ((char *)(&(tmp.cb)) - (char *)(&tmp)) +
+			((char *)(&(tmp2.funcstate.retransmit_queue.sent_pid)) -
+			(char *)(&tmp2));
+}
+
+
+static int clear_retransmit(__u32 packet_id)
+{
+	struct sk_buff *ret;
+
+	struct skb_procstate *ps;
+
+	search_htab(&retransmits, packet_id, retransmit_keyoffset(),
+			(struct htab_entry **)&ret);
+
+	if (ret == 0)
+		return 1;
+
+	ps = skb_pstate(&(ret));
+	dec_connrefs(ps->conn);
+	kfree_skb(ret);
+	
+	return 0;
+}
+
+static void send_retransmit(struct sk_buff *skb)
+{
+	struct skb_procstate *ps = skb_pstate(&(skb));
+	struct neighbor *nb = ps->conn->target.out.nb;
+
+	nb->first_retransmit = ps->funcstate.retransmit_queue.next;
+	nb->first_retransmit->prev = 0;
+	ps->funcstate.retransmit_queue.next = 0;
+	ps->funcstate.retransmit_queue.prev = nb->last_retransmit;
+	nb->last_retransmit = skb;
+
+	ps->funcstate.retransmit_queue.timeout = jiffies + nb->latency;
+}
+
+static void retransmit_timerfunc(unsigned long arg)
+{
+	unsigned long iflags;
+
+	struct neighbor *nb = (struct neighbor *) arg;
+	struct sk_buff *skb;
+	struct skb_procstate *ps = 0;
+	__u32 timeout;
+
+	spin_lock_irqsave( &(nb->retrans_lock), iflags );
+
+	skb = nb->first_retransmit;
+
+	if (0 == skb)
+		goto out;
+
+	ps = skb_pstate(&(skb));
+	timeout = ps->funcstate.retransmit_queue.timeout;
+
+	if (time_after_eq(timeout, jiffies))
+		send_retransmit(skb);
+
+	mod_timer(&(nb->retrans_timer), timeout);
+
+out:
+	spin_unlock_irqrestore( &(nb->retrans_lock), iflags );
+}
+
+
+static void sched_retransmit(struct sk_buff *skb)
+{
+	unsigned long iflags;
+
+	struct sk_buff *skb2 = skb_clone(skb, __GFP_DMA | GFP_KERNEL);
+	struct skb_procstate *ps = skb_pstate(&(skb2));
+	__u32 packet_id = ps->funcstate.retransmit_queue.sent_pid;
+	struct neighbor *nb = ps->conn->target.out.nb;
+
+	int ret = search_htab(&retransmits, packet_id, retransmit_keyoffset(),
+			(struct htab_entry **)&skb2);
+	BUG_TRAP(1 == ret);
+
+	spin_lock_irqsave( &(nb->retrans_lock), iflags );
+
+	ps->funcstate.retransmit_queue.next = 0;
+	ps->funcstate.retransmit_queue.prev = nb->last_retransmit;
+	nb->last_retransmit = skb2;
+	if (unlikely(0 == nb->first_retransmit)) {
+		nb->first_retransmit = skb;
+		mod_timer(&(nb->retrans_timer),
+				ps->funcstate.retransmit_queue.timeout);
+	}
+
+	spin_unlock_irqrestore( &(nb->retrans_lock), iflags );
+}
+
+static void pop_pid(struct conn *conn, __u32 *packet_id)
+{
+	__u32 *packet_id_ptr;
+
+	struct packet_id_out *pid_out = conn->target.out.next_pid;
+
+	if (0 == pid_out) {
+		*packet_id = 0;
+		return;
+	}
+
+	BUG_TRAP(pid_out->num_ids == 0);
+
+	packet_id_ptr = (__u32 *)
+				(((char *) pid_out) +
+				((sizeof(struct packet_id_out) + 3) / 4) * 4);
+	packet_id_ptr += pid_out->offset;
+	*packet_id = *packet_id_ptr;
+
+	pid_out->offset++;
+	pid_out->num_ids--;
+
+	if (unlikely(0 == pid_out->num_ids)) {
+		conn->target.out.next_pid = pid_out->next;
+		if (unlikely(conn->target.out.last_pid == pid_out)) {
+			BUG_TRAP(pid_out->next != 0);
+			conn->target.out.last_pid = 0;
+		}
+		#warning todo free pid_out
+	}
+}
+
+static void check_send_packet_id(struct conn *target,
+		struct packet_id_out *pid_out, int *packet_id_sent)
+{
+	if (unlikely(0 == pid_out->next && 1 == pid_out->num_ids &&
+			0 == packet_id_sent)) {
+		__u32 packet_id;
+		pop_pid(target, &packet_id);
+		#warning todo send packet id definition
+	}
+}
+
+static void next_pid(struct conn *conn, __u32 *packet_id, int *packet_id_sent)
+{
+	unsigned long iflags;
+
+	struct packet_id_out *pid_out;
+
+	*packet_id = 0;
+
+	spin_lock_irqsave( &(conn->target.out.pid_lock), iflags );
+
+	*packet_id_sent = conn->target.out.packet_id_sent;
+	pid_out = conn->target.out.next_pid;
+
+	if (0 == pid_out)
+		goto out;
+
+	check_send_packet_id(conn, pid_out, packet_id_sent);
+
+	pop_pid(conn, packet_id);
+out:
+	spin_unlock_irqrestore( &(conn->target.out.pid_lock), iflags );
+
+	if (unlikely(*packet_id == 0 && *packet_id_sent == 0)) {
+		#warning todo log error + reset connection
+		/* (in a way that doesn't crash the caller) */
+	}
+}
+
+
+
+struct sk_buff * create_packet(struct conn *target, int size, int alloc_flags)
+{
+	struct skb *ret;
+
+	struct neighbor *nb = target->target.out.nb;
+	struct net_device *dev = nb->dev;
+
+	__u32 packet_id = 0;
+	int packet_id_sent;
+
+	__u32 *dest;
+
+	#warning todo
+	/*if(!(target->state & TARGET_STATEMASK_READY))
+		return 0;*/
+
+	next_pid(target, &packet_id, &packet_id_sent);
+
+	if (0 == packet_id) {
+		/* ??? */
+	}
+
+
+	/*ret = alloc_skb(size + dev->hard_header_len, alloc_flags);
+	if(dev->hard_header){
+		if(unlikely(dev->hard_header(ret, dev, ETH_P_COR, nb->mac,
+				dev->dev_addr, ret->len) < 0)){
+
+			#warning error
+		}
+	}*/
+
+	dest = (__u32 *) skb_put(ret, 4);
+	BUG_TRAP(0 == dest);
+	*dest = htonl(packet_id);
+
+	return ret;
+}
+
+void send_packet(struct sk_buff *skb)
+{
+	struct skb_procstate *ps = skb_pstate(&skb);
+	BUG_TRAP(TARGET_OUT != ps->conn->targettype);
+
+	sched_retransmit(skb);
+	#warning xmit
+}
+
+struct conn *ack_received(struct neighbor *nb, __u32 packet_id, int nack)
+{
+	unsigned long iflags;
+
+	struct sk_buff *skb = 0;
+	struct skb_procstate *ps = 0;
+
+	int ret = search_htab(&retransmits, packet_id, retransmit_keyoffset(),
+			(struct htab_entry **)&skb);
+
+	if (0 == ret)
+		return 0;
+
+	ps = skb_pstate(&(skb));
+	if (nb != ps->conn->target.out.nb) {
+		#warning todo bogus packet received
+	}
+
+	spin_lock_irqsave( &(nb->retrans_lock), iflags );
+
+	if (unlikely(nb->first_retransmit == skb)) {
+		if (unlikely(nb->last_retransmit == skb)) {
+			nb->first_retransmit = nb->last_retransmit = 0;
+		} else {
+			nb->first_retransmit = skb->next;
+			mod_timer(&(nb->retrans_timer), jiffies + nb->latency);
+		}
+	}
+
+	spin_unlock_irqrestore( &(nb->retrans_lock), iflags );
+
+	kfree_skb(skb);
+}
+
+MODULE_LICENSE("GPL");
diff -u -r -N ./net/cor/sock.c ./net/cor/sock.c
--- ./net/cor/sock.c	1970-01-01 01:00:00.000000000 +0100
+++ ./net/cor/sock.c	2008-01-01 15:57:59.000000000 +0100
@@ -0,0 +1,138 @@
+#include <net/sock.h>
+#include <linux/net.h>
+
+#include "cor.h"
+
+int cor_socket_release(struct socket *sock)
+{
+}
+
+int cor_socket_bind(struct socket *sock, struct sockaddr *myaddr,
+		int sockaddr_len)
+{
+}
+
+int cor_socket_connect(struct socket *sock, struct sockaddr *vaddr,
+		int sockaddr_len, int flags)
+{
+}
+
+int cor_socket_accept(struct socket *sock, struct socket *newsock, int flags)
+{
+}
+
+int cor_socket_listen(struct socket *sock, int len)
+{
+}
+
+int cor_socket_shutdown(struct socket *sock, int flags)
+{
+}
+
+int cor_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+{
+}
+
+int cor_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
+		size_t total_len)
+{
+	#warning TODO
+	int copy = 0;
+	int err = 0;
+
+	struct conn *conn = sock->sk;
+
+	/*long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);*/
+
+	while (1) {
+		//struct sk_buff *skb = sk->sk_write_queue.next;
+
+		//if(skb)
+		//	goto copy;
+
+		//if((err = sk_stream_wait_memory(sk, &timeo)) != 0)
+		//	goto out;
+
+		//skb=create_new_packet(target);
+		//if(skb == 0)
+//copy:
+	}
+
+//out:
+	//sk->sk_wmem_queued+=copy;
+	if (copy)
+		return copy;
+	return err;
+}
+
+int cor_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
+		size_t total_len, int flags)
+{
+	int nonblock = flags & MSG_DONTWAIT;
+	struct conn *sk = sock->sk;
+	size_t copied = 0;
+	/*while(copied < total_len){
+		if (skb != 0) {
+			#warning lock?
+			struct sk_buff *skb = __skb_dequeue(&(best->queue));
+			#warning copied???
+			#warning ???
+			skb_copy_datagram_iovec(skb, 0, m->msg_iov, skb->len);
+		} else if(nonblock || (copied > 0)) {
+			break;
+		} else {
+			wait_event_interruptible(rcv_queue.readers,
+					rcv_queue.head != 0);
+		}
+
+	}*/
+	return (copied != 0) ? copied : (-EAGAIN);
+}
+
+const struct proto_ops cor_proto_ops = {
+		//.family =
+		.owner = THIS_MODULE,
+		.release = cor_socket_release,
+		.bind = cor_socket_bind,
+		.connect = cor_socket_connect,
+		.accept = cor_socket_accept,
+		.listen = cor_socket_listen,
+		.shutdown = cor_socket_shutdown,
+		.ioctl = cor_ioctl,
+		.sendmsg = cor_sendmsg,
+		.recvmsg = cor_recvmsg
+};
+
+
+int cor_createsock(struct socket *sock, int protocol)
+{
+	struct conn *conn_rcv;
+	struct conn *conn_snd;
+
+	#warning todo
+	//conn_rcv = sk_alloc(PF_INET, GFP_KERNEL, &cor_proto_ops, 1);
+	if (conn_rcv == NULL)
+		goto out;
+
+	//conn_snd = sk_alloc(PF_INET, GFP_KERNEL, &cor_proto_ops, 1);
+	if (conn_snd == NULL)
+		goto out;
+
+	/*socket->ops = cor_proto_ops*/
+out:
+
+	return 0;
+}
+
+
+const struct net_proto_family cor_net_proto_family = {
+//	.family=,
+	.create = cor_createsock,
+//	.authentication = 0,
+//	.encryption = 0,
+//	.encrypt_net = 0,
+	.owner = THIS_MODULE
+};
+
+MODULE_LICENSE("GPL");
+
